summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/etnaviv
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/etnaviv
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/etnaviv')
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig24
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile19
-rw-r--r--drivers/gpu/drm/etnaviv/cmdstream.xml.h270
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h485
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c538
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c206
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c142
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h46
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c716
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h123
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c229
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.h42
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c731
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h124
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c147
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c632
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c1926
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h186
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c156
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c176
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c314
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c571
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h127
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c583
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.h38
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c149
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.h23
-rw-r--r--drivers/gpu/drm/etnaviv/state.xml.h502
-rw-r--r--drivers/gpu/drm/etnaviv/state_3d.xml.h15
-rw-r--r--drivers/gpu/drm/etnaviv/state_blt.xml.h54
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h570
31 files changed, 9864 insertions, 0 deletions
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
new file mode 100644
index 000000000..faa7fc68b
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ETNAVIV
+ tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
+ depends on DRM
+ depends on MMU
+ select SHMEM
+ select SYNC_FILE
+ select THERMAL if DRM_ETNAVIV_THERMAL
+ select TMPFS
+ select WANT_DEV_COREDUMP
+ select CMA if HAVE_DMA_CONTIGUOUS
+ select DMA_CMA if HAVE_DMA_CONTIGUOUS
+ select DRM_SCHED
+ help
+ DRM driver for Vivante GPUs.
+
+config DRM_ETNAVIV_THERMAL
+ bool "enable ETNAVIV thermal throttling"
+ depends on DRM_ETNAVIV
+ default y
+ help
+ Compile in support for thermal throttling.
+ Say Y unless you want to risk burning your SoC.
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
new file mode 100644
index 000000000..46e5ffad6
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+etnaviv-y := \
+ etnaviv_buffer.o \
+ etnaviv_cmd_parser.o \
+ etnaviv_cmdbuf.o \
+ etnaviv_drv.o \
+ etnaviv_dump.o \
+ etnaviv_gem_prime.o \
+ etnaviv_gem_submit.o \
+ etnaviv_gem.o \
+ etnaviv_gpu.o \
+ etnaviv_hwdb.o \
+ etnaviv_iommu_v2.o \
+ etnaviv_iommu.o \
+ etnaviv_mmu.o \
+ etnaviv_perfmon.o \
+ etnaviv_sched.o
+
+obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
new file mode 100644
index 000000000..65f1ba109
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
@@ -0,0 +1,270 @@
+#ifndef CMDSTREAM_XML
+#define CMDSTREAM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- cmdstream.xml ( 14094 bytes, from 2016-11-11 06:55:14)
+- copyright.xml ( 1597 bytes, from 2016-10-29 07:29:22)
+- common.xml ( 23344 bytes, from 2016-11-10 15:14:07)
+
+Copyright (C) 2012-2016 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define FE_OPCODE_LOAD_STATE 0x00000001
+#define FE_OPCODE_END 0x00000002
+#define FE_OPCODE_NOP 0x00000003
+#define FE_OPCODE_DRAW_2D 0x00000004
+#define FE_OPCODE_DRAW_PRIMITIVES 0x00000005
+#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES 0x00000006
+#define FE_OPCODE_WAIT 0x00000007
+#define FE_OPCODE_LINK 0x00000008
+#define FE_OPCODE_STALL 0x00000009
+#define FE_OPCODE_CALL 0x0000000a
+#define FE_OPCODE_RETURN 0x0000000b
+#define FE_OPCODE_DRAW_INSTANCED 0x0000000c
+#define FE_OPCODE_CHIP_SELECT 0x0000000d
+#define PRIMITIVE_TYPE_POINTS 0x00000001
+#define PRIMITIVE_TYPE_LINES 0x00000002
+#define PRIMITIVE_TYPE_LINE_STRIP 0x00000003
+#define PRIMITIVE_TYPE_TRIANGLES 0x00000004
+#define PRIMITIVE_TYPE_TRIANGLE_STRIP 0x00000005
+#define PRIMITIVE_TYPE_TRIANGLE_FAN 0x00000006
+#define PRIMITIVE_TYPE_LINE_LOOP 0x00000007
+#define PRIMITIVE_TYPE_QUADS 0x00000008
+#define VIV_FE_LOAD_STATE 0x00000000
+
+#define VIV_FE_LOAD_STATE_HEADER 0x00000000
+#define VIV_FE_LOAD_STATE_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT 27
+#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE 0x08000000
+#define VIV_FE_LOAD_STATE_HEADER_FIXP 0x04000000
+#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK 0x03ff0000
+#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT 16
+#define VIV_FE_LOAD_STATE_HEADER_COUNT(x) (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK)
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK 0x0000ffff
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT 0
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x) (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK)
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR 2
+
+#define VIV_FE_END 0x00000000
+
+#define VIV_FE_END_HEADER 0x00000000
+#define VIV_FE_END_HEADER_EVENT_ID__MASK 0x0000001f
+#define VIV_FE_END_HEADER_EVENT_ID__SHIFT 0
+#define VIV_FE_END_HEADER_EVENT_ID(x) (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK)
+#define VIV_FE_END_HEADER_EVENT_ENABLE 0x00000100
+#define VIV_FE_END_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_END_HEADER_OP__SHIFT 27
+#define VIV_FE_END_HEADER_OP_END 0x10000000
+
+#define VIV_FE_NOP 0x00000000
+
+#define VIV_FE_NOP_HEADER 0x00000000
+#define VIV_FE_NOP_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_NOP_HEADER_OP__SHIFT 27
+#define VIV_FE_NOP_HEADER_OP_NOP 0x18000000
+
+#define VIV_FE_DRAW_2D 0x00000000
+
+#define VIV_FE_DRAW_2D_HEADER 0x00000000
+#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK 0x0000ff00
+#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT 8
+#define VIV_FE_DRAW_2D_HEADER_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK)
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK 0x07ff0000
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT 16
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK)
+#define VIV_FE_DRAW_2D_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D 0x20000000
+
+#define VIV_FE_DRAW_2D_TOP_LEFT 0x00000008
+#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK 0x0000ffff
+#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT 0
+#define VIV_FE_DRAW_2D_TOP_LEFT_X(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK)
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK 0xffff0000
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT 16
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK)
+
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT 0x0000000c
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK 0x0000ffff
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT 0
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK)
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK 0xffff0000
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT 16
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK)
+
+#define VIV_FE_DRAW_PRIMITIVES 0x00000000
+
+#define VIV_FE_DRAW_PRIMITIVES_HEADER 0x00000000
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES 0x28000000
+
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND 0x00000004
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT 0
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK)
+
+#define VIV_FE_DRAW_PRIMITIVES_START 0x00000008
+
+#define VIV_FE_DRAW_PRIMITIVES_COUNT 0x0000000c
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES 0x00000000
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER 0x00000000
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES 0x30000000
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND 0x00000004
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT 0
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK)
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START 0x00000008
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT 0x0000000c
+
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET 0x00000010
+
+#define VIV_FE_WAIT 0x00000000
+
+#define VIV_FE_WAIT_HEADER 0x00000000
+#define VIV_FE_WAIT_HEADER_DELAY__MASK 0x0000ffff
+#define VIV_FE_WAIT_HEADER_DELAY__SHIFT 0
+#define VIV_FE_WAIT_HEADER_DELAY(x) (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK)
+#define VIV_FE_WAIT_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_WAIT_HEADER_OP__SHIFT 27
+#define VIV_FE_WAIT_HEADER_OP_WAIT 0x38000000
+
+#define VIV_FE_LINK 0x00000000
+
+#define VIV_FE_LINK_HEADER 0x00000000
+#define VIV_FE_LINK_HEADER_PREFETCH__MASK 0x0000ffff
+#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT 0
+#define VIV_FE_LINK_HEADER_PREFETCH(x) (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK)
+#define VIV_FE_LINK_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_LINK_HEADER_OP__SHIFT 27
+#define VIV_FE_LINK_HEADER_OP_LINK 0x40000000
+
+#define VIV_FE_LINK_ADDRESS 0x00000004
+
+#define VIV_FE_STALL 0x00000000
+
+#define VIV_FE_STALL_HEADER 0x00000000
+#define VIV_FE_STALL_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_STALL_HEADER_OP__SHIFT 27
+#define VIV_FE_STALL_HEADER_OP_STALL 0x48000000
+
+#define VIV_FE_STALL_TOKEN 0x00000004
+#define VIV_FE_STALL_TOKEN_FROM__MASK 0x0000001f
+#define VIV_FE_STALL_TOKEN_FROM__SHIFT 0
+#define VIV_FE_STALL_TOKEN_FROM(x) (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK)
+#define VIV_FE_STALL_TOKEN_TO__MASK 0x00001f00
+#define VIV_FE_STALL_TOKEN_TO__SHIFT 8
+#define VIV_FE_STALL_TOKEN_TO(x) (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK)
+
+#define VIV_FE_CALL 0x00000000
+
+#define VIV_FE_CALL_HEADER 0x00000000
+#define VIV_FE_CALL_HEADER_PREFETCH__MASK 0x0000ffff
+#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT 0
+#define VIV_FE_CALL_HEADER_PREFETCH(x) (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK)
+#define VIV_FE_CALL_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_CALL_HEADER_OP__SHIFT 27
+#define VIV_FE_CALL_HEADER_OP_CALL 0x50000000
+
+#define VIV_FE_CALL_ADDRESS 0x00000004
+
+#define VIV_FE_CALL_RETURN_PREFETCH 0x00000008
+
+#define VIV_FE_CALL_RETURN_ADDRESS 0x0000000c
+
+#define VIV_FE_RETURN 0x00000000
+
+#define VIV_FE_RETURN_HEADER 0x00000000
+#define VIV_FE_RETURN_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_RETURN_HEADER_OP__SHIFT 27
+#define VIV_FE_RETURN_HEADER_OP_RETURN 0x58000000
+
+#define VIV_FE_CHIP_SELECT 0x00000000
+
+#define VIV_FE_CHIP_SELECT_HEADER 0x00000000
+#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT 27
+#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT 0x68000000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15 0x00008000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14 0x00004000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13 0x00002000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12 0x00001000
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11 0x00000800
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10 0x00000400
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9 0x00000200
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8 0x00000100
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7 0x00000080
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6 0x00000040
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5 0x00000020
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4 0x00000010
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3 0x00000008
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2 0x00000004
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001
+
+#define VIV_FE_DRAW_INSTANCED 0x00000000
+
+#define VIV_FE_DRAW_INSTANCED_HEADER 0x00000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP__MASK 0xf8000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP__SHIFT 27
+#define VIV_FE_DRAW_INSTANCED_HEADER_OP_DRAW_INSTANCED 0x60000000
+#define VIV_FE_DRAW_INSTANCED_HEADER_INDEXED 0x00100000
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE__MASK 0x000f0000
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE__SHIFT 16
+#define VIV_FE_DRAW_INSTANCED_HEADER_TYPE(x) (((x) << VIV_FE_DRAW_INSTANCED_HEADER_TYPE__SHIFT) & VIV_FE_DRAW_INSTANCED_HEADER_TYPE__MASK)
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__MASK 0x0000ffff
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO(x) (((x) << VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__SHIFT) & VIV_FE_DRAW_INSTANCED_HEADER_INSTANCE_COUNT_LO__MASK)
+
+#define VIV_FE_DRAW_INSTANCED_COUNT 0x00000004
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__MASK 0xff000000
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__SHIFT 24
+#define VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI(x) (((x) << VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__SHIFT) & VIV_FE_DRAW_INSTANCED_COUNT_INSTANCE_COUNT_HI__MASK)
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__MASK 0x00ffffff
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT(x) (((x) << VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__SHIFT) & VIV_FE_DRAW_INSTANCED_COUNT_VERTEX_COUNT__MASK)
+
+#define VIV_FE_DRAW_INSTANCED_START 0x00000008
+#define VIV_FE_DRAW_INSTANCED_START_INDEX__MASK 0xffffffff
+#define VIV_FE_DRAW_INSTANCED_START_INDEX__SHIFT 0
+#define VIV_FE_DRAW_INSTANCED_START_INDEX(x) (((x) << VIV_FE_DRAW_INSTANCED_START_INDEX__SHIFT) & VIV_FE_DRAW_INSTANCED_START_INDEX__MASK)
+
+
+#endif /* CMDSTREAM_XML */
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
new file mode 100644
index 000000000..001faea80
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -0,0 +1,485 @@
+#ifndef COMMON_XML
+#define COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- texdesc_3d.xml ( 3183 bytes, from 2017-12-18 16:51:59)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+
+Copyright (C) 2012-2018 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define PIPE_ID_PIPE_3D 0x00000000
+#define PIPE_ID_PIPE_2D 0x00000001
+#define SYNC_RECIPIENT_FE 0x00000001
+#define SYNC_RECIPIENT_RA 0x00000005
+#define SYNC_RECIPIENT_PE 0x00000007
+#define SYNC_RECIPIENT_DE 0x0000000b
+#define SYNC_RECIPIENT_BLT 0x00000010
+#define ENDIAN_MODE_NO_SWAP 0x00000000
+#define ENDIAN_MODE_SWAP_16 0x00000001
+#define ENDIAN_MODE_SWAP_32 0x00000002
+#define chipModel_GC200 0x00000200
+#define chipModel_GC300 0x00000300
+#define chipModel_GC320 0x00000320
+#define chipModel_GC328 0x00000328
+#define chipModel_GC350 0x00000350
+#define chipModel_GC355 0x00000355
+#define chipModel_GC400 0x00000400
+#define chipModel_GC410 0x00000410
+#define chipModel_GC420 0x00000420
+#define chipModel_GC428 0x00000428
+#define chipModel_GC450 0x00000450
+#define chipModel_GC500 0x00000500
+#define chipModel_GC520 0x00000520
+#define chipModel_GC530 0x00000530
+#define chipModel_GC600 0x00000600
+#define chipModel_GC700 0x00000700
+#define chipModel_GC800 0x00000800
+#define chipModel_GC860 0x00000860
+#define chipModel_GC880 0x00000880
+#define chipModel_GC900 0x00000900
+#define chipModel_GC1000 0x00001000
+#define chipModel_GC1500 0x00001500
+#define chipModel_GC2000 0x00002000
+#define chipModel_GC2100 0x00002100
+#define chipModel_GC2200 0x00002200
+#define chipModel_GC2500 0x00002500
+#define chipModel_GC3000 0x00003000
+#define chipModel_GC4000 0x00004000
+#define chipModel_GC5000 0x00005000
+#define chipModel_GC5200 0x00005200
+#define chipModel_GC6400 0x00006400
+#define chipModel_GC7000 0x00007000
+#define chipModel_GC7400 0x00007400
+#define chipModel_GC8000 0x00008000
+#define chipModel_GC8100 0x00008100
+#define chipModel_GC8200 0x00008200
+#define chipModel_GC8400 0x00008400
+#define RGBA_BITS_R 0x00000001
+#define RGBA_BITS_G 0x00000002
+#define RGBA_BITS_B 0x00000004
+#define RGBA_BITS_A 0x00000008
+#define chipFeatures_FAST_CLEAR 0x00000001
+#define chipFeatures_SPECIAL_ANTI_ALIASING 0x00000002
+#define chipFeatures_PIPE_3D 0x00000004
+#define chipFeatures_DXT_TEXTURE_COMPRESSION 0x00000008
+#define chipFeatures_DEBUG_MODE 0x00000010
+#define chipFeatures_Z_COMPRESSION 0x00000020
+#define chipFeatures_YUV420_SCALER 0x00000040
+#define chipFeatures_MSAA 0x00000080
+#define chipFeatures_DC 0x00000100
+#define chipFeatures_PIPE_2D 0x00000200
+#define chipFeatures_ETC1_TEXTURE_COMPRESSION 0x00000400
+#define chipFeatures_FAST_SCALER 0x00000800
+#define chipFeatures_HIGH_DYNAMIC_RANGE 0x00001000
+#define chipFeatures_YUV420_TILER 0x00002000
+#define chipFeatures_MODULE_CG 0x00004000
+#define chipFeatures_MIN_AREA 0x00008000
+#define chipFeatures_NO_EARLY_Z 0x00010000
+#define chipFeatures_NO_422_TEXTURE 0x00020000
+#define chipFeatures_BUFFER_INTERLEAVING 0x00040000
+#define chipFeatures_BYTE_WRITE_2D 0x00080000
+#define chipFeatures_NO_SCALER 0x00100000
+#define chipFeatures_YUY2_AVERAGING 0x00200000
+#define chipFeatures_HALF_PE_CACHE 0x00400000
+#define chipFeatures_HALF_TX_CACHE 0x00800000
+#define chipFeatures_YUY2_RENDER_TARGET 0x01000000
+#define chipFeatures_MEM32 0x02000000
+#define chipFeatures_PIPE_VG 0x04000000
+#define chipFeatures_VGTS 0x08000000
+#define chipFeatures_FE20 0x10000000
+#define chipFeatures_BYTE_WRITE_3D 0x20000000
+#define chipFeatures_RS_YUV_TARGET 0x40000000
+#define chipFeatures_32_BIT_INDICES 0x80000000
+#define chipMinorFeatures0_FLIP_Y 0x00000001
+#define chipMinorFeatures0_DUAL_RETURN_BUS 0x00000002
+#define chipMinorFeatures0_ENDIANNESS_CONFIG 0x00000004
+#define chipMinorFeatures0_TEXTURE_8K 0x00000008
+#define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER 0x00000010
+#define chipMinorFeatures0_SPECIAL_MSAA_LOD 0x00000020
+#define chipMinorFeatures0_FAST_CLEAR_FLUSH 0x00000040
+#define chipMinorFeatures0_2DPE20 0x00000080
+#define chipMinorFeatures0_CORRECT_AUTO_DISABLE 0x00000100
+#define chipMinorFeatures0_RENDERTARGET_8K 0x00000200
+#define chipMinorFeatures0_2BITPERTILE 0x00000400
+#define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED 0x00000800
+#define chipMinorFeatures0_SUPER_TILED 0x00001000
+#define chipMinorFeatures0_VG_20 0x00002000
+#define chipMinorFeatures0_TS_EXTENDED_COMMANDS 0x00004000
+#define chipMinorFeatures0_COMPRESSION_FIFO_FIXED 0x00008000
+#define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL 0x00010000
+#define chipMinorFeatures0_VG_FILTER 0x00020000
+#define chipMinorFeatures0_VG_21 0x00040000
+#define chipMinorFeatures0_SHADER_HAS_W 0x00080000
+#define chipMinorFeatures0_HAS_SQRT_TRIG 0x00100000
+#define chipMinorFeatures0_MORE_MINOR_FEATURES 0x00200000
+#define chipMinorFeatures0_MC20 0x00400000
+#define chipMinorFeatures0_MSAA_SIDEBAND 0x00800000
+#define chipMinorFeatures0_BUG_FIXES0 0x01000000
+#define chipMinorFeatures0_VAA 0x02000000
+#define chipMinorFeatures0_BYPASS_IN_MSAA 0x04000000
+#define chipMinorFeatures0_HZ 0x08000000
+#define chipMinorFeatures0_NEW_TEXTURE 0x10000000
+#define chipMinorFeatures0_2D_A8_TARGET 0x20000000
+#define chipMinorFeatures0_CORRECT_STENCIL 0x40000000
+#define chipMinorFeatures0_ENHANCE_VR 0x80000000
+#define chipMinorFeatures1_RSUV_SWIZZLE 0x00000001
+#define chipMinorFeatures1_V2_COMPRESSION 0x00000002
+#define chipMinorFeatures1_VG_DOUBLE_BUFFER 0x00000004
+#define chipMinorFeatures1_EXTRA_EVENT_STATES 0x00000008
+#define chipMinorFeatures1_NO_STRIPING_NEEDED 0x00000010
+#define chipMinorFeatures1_TEXTURE_STRIDE 0x00000020
+#define chipMinorFeatures1_BUG_FIXES3 0x00000040
+#define chipMinorFeatures1_AUTO_DISABLE 0x00000080
+#define chipMinorFeatures1_AUTO_RESTART_TS 0x00000100
+#define chipMinorFeatures1_DISABLE_PE_GATING 0x00000200
+#define chipMinorFeatures1_L2_WINDOWING 0x00000400
+#define chipMinorFeatures1_HALF_FLOAT 0x00000800
+#define chipMinorFeatures1_PIXEL_DITHER 0x00001000
+#define chipMinorFeatures1_TWO_STENCIL_REFERENCE 0x00002000
+#define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT 0x00004000
+#define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH 0x00008000
+#define chipMinorFeatures1_2D_DITHER 0x00010000
+#define chipMinorFeatures1_BUG_FIXES5 0x00020000
+#define chipMinorFeatures1_NEW_2D 0x00040000
+#define chipMinorFeatures1_NEW_FP 0x00080000
+#define chipMinorFeatures1_TEXTURE_HALIGN 0x00100000
+#define chipMinorFeatures1_NON_POWER_OF_TWO 0x00200000
+#define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT 0x00400000
+#define chipMinorFeatures1_HALTI0 0x00800000
+#define chipMinorFeatures1_CORRECT_OVERFLOW_VG 0x01000000
+#define chipMinorFeatures1_NEGATIVE_LOG_FIX 0x02000000
+#define chipMinorFeatures1_RESOLVE_OFFSET 0x04000000
+#define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK 0x08000000
+#define chipMinorFeatures1_MMU_VERSION 0x10000000
+#define chipMinorFeatures1_WIDE_LINE 0x20000000
+#define chipMinorFeatures1_BUG_FIXES6 0x40000000
+#define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000
+#define chipMinorFeatures2_LINE_LOOP 0x00000001
+#define chipMinorFeatures2_LOGIC_OP 0x00000002
+#define chipMinorFeatures2_SEAMLESS_CUBE_MAP 0x00000004
+#define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008
+#define chipMinorFeatures2_LINEAR_PE 0x00000010
+#define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020
+#define chipMinorFeatures2_COMPOSITION 0x00000040
+#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080
+#define chipMinorFeatures2_PE_SWIZZLE 0x00000100
+#define chipMinorFeatures2_END_EVENT 0x00000200
+#define chipMinorFeatures2_S1S8 0x00000400
+#define chipMinorFeatures2_HALTI1 0x00000800
+#define chipMinorFeatures2_RGB888 0x00001000
+#define chipMinorFeatures2_TX__YUV_ASSEMBLER 0x00002000
+#define chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING 0x00004000
+#define chipMinorFeatures2_TX_FILTER 0x00008000
+#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000
+#define chipMinorFeatures2_2D_TILING 0x00020000
+#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000
+#define chipMinorFeatures2_TILE_FILLER 0x00080000
+#define chipMinorFeatures2_YUV_STANDARD 0x00100000
+#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000
+#define chipMinorFeatures2_YUV_CONVERSION 0x00400000
+#define chipMinorFeatures2_FLUSH_FIXED_2D 0x00800000
+#define chipMinorFeatures2_INTERLEAVER 0x01000000
+#define chipMinorFeatures2_MIXED_STREAMS 0x02000000
+#define chipMinorFeatures2_2D_420_L2CACHE 0x04000000
+#define chipMinorFeatures2_BUG_FIXES7 0x08000000
+#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000
+#define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000
+#define chipMinorFeatures2_DECOMPRESS_Z16 0x40000000
+#define chipMinorFeatures2_BUG_FIXES8 0x80000000
+#define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001
+#define chipMinorFeatures3_OCL_ONLY 0x00000002
+#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004
+#define chipMinorFeatures3_INSTRUCTION_CACHE 0x00000008
+#define chipMinorFeatures3_GEOMETRY_SHADER 0x00000010
+#define chipMinorFeatures3_TEX_COMPRESSION_SUPERTILED 0x00000020
+#define chipMinorFeatures3_GENERICS 0x00000040
+#define chipMinorFeatures3_BUG_FIXES9 0x00000080
+#define chipMinorFeatures3_FAST_MSAA 0x00000100
+#define chipMinorFeatures3_WCLIP 0x00000200
+#define chipMinorFeatures3_BUG_FIXES10 0x00000400
+#define chipMinorFeatures3_UNIFIED_SAMPLERS 0x00000800
+#define chipMinorFeatures3_BUG_FIXES11 0x00001000
+#define chipMinorFeatures3_PERFORMANCE_COUNTERS 0x00002000
+#define chipMinorFeatures3_HAS_FAST_TRANSCENDENTALS 0x00004000
+#define chipMinorFeatures3_BUG_FIXES12 0x00008000
+#define chipMinorFeatures3_BUG_FIXES13 0x00010000
+#define chipMinorFeatures3_DE_ENHANCEMENTS1 0x00020000
+#define chipMinorFeatures3_ACE 0x00040000
+#define chipMinorFeatures3_TX_ENHANCEMENTS1 0x00080000
+#define chipMinorFeatures3_SH_ENHANCEMENTS1 0x00100000
+#define chipMinorFeatures3_SH_ENHANCEMENTS2 0x00200000
+#define chipMinorFeatures3_PE_ENHANCEMENTS1 0x00400000
+#define chipMinorFeatures3_2D_FC_SOURCE 0x00800000
+#define chipMinorFeatures3_BUG_FIXES_14 0x01000000
+#define chipMinorFeatures3_POWER_OPTIMIZATIONS_0 0x02000000
+#define chipMinorFeatures3_NEW_HZ 0x04000000
+#define chipMinorFeatures3_PE_DITHER_FIX 0x08000000
+#define chipMinorFeatures3_DE_ENHANCEMENTS3 0x10000000
+#define chipMinorFeatures3_SH_ENHANCEMENTS3 0x20000000
+#define chipMinorFeatures3_SH_ENHANCEMENTS4 0x40000000
+#define chipMinorFeatures3_TX_ENHANCEMENTS2 0x80000000
+#define chipMinorFeatures4_FE_ENHANCEMENTS1 0x00000001
+#define chipMinorFeatures4_PE_ENHANCEMENTS2 0x00000002
+#define chipMinorFeatures4_FRUSTUM_CLIP_FIX 0x00000004
+#define chipMinorFeatures4_DE_NO_GAMMA 0x00000008
+#define chipMinorFeatures4_PA_ENHANCEMENTS_2 0x00000010
+#define chipMinorFeatures4_2D_GAMMA 0x00000020
+#define chipMinorFeatures4_SINGLE_BUFFER 0x00000040
+#define chipMinorFeatures4_HI_ENHANCEMENTS_1 0x00000080
+#define chipMinorFeatures4_TX_ENHANCEMENTS_3 0x00000100
+#define chipMinorFeatures4_SH_ENHANCEMENTS_5 0x00000200
+#define chipMinorFeatures4_FE_ENHANCEMENTS_2 0x00000400
+#define chipMinorFeatures4_TX_LERP_PRECISION_FIX 0x00000800
+#define chipMinorFeatures4_2D_COLOR_SPACE_CONVERSION 0x00001000
+#define chipMinorFeatures4_TEXTURE_ASTC 0x00002000
+#define chipMinorFeatures4_PE_ENHANCEMENTS_4 0x00004000
+#define chipMinorFeatures4_MC_ENHANCEMENTS_1 0x00008000
+#define chipMinorFeatures4_HALTI2 0x00010000
+#define chipMinorFeatures4_2D_MIRROR_EXTENSION 0x00020000
+#define chipMinorFeatures4_SMALL_MSAA 0x00040000
+#define chipMinorFeatures4_BUG_FIXES_17 0x00080000
+#define chipMinorFeatures4_NEW_RA 0x00100000
+#define chipMinorFeatures4_2D_OPF_YUV_OUTPUT 0x00200000
+#define chipMinorFeatures4_2D_MULTI_SOURCE_BLT_EX2 0x00400000
+#define chipMinorFeatures4_NO_USER_CSC 0x00800000
+#define chipMinorFeatures4_ZFIXES 0x01000000
+#define chipMinorFeatures4_BUG_FIXES18 0x02000000
+#define chipMinorFeatures4_2D_COMPRESSION 0x04000000
+#define chipMinorFeatures4_PROBE 0x08000000
+#define chipMinorFeatures4_MEDIUM_PRECISION 0x10000000
+#define chipMinorFeatures4_2D_SUPER_TILE_VERSION 0x20000000
+#define chipMinorFeatures4_BUG_FIXES19 0x40000000
+#define chipMinorFeatures4_SH_ENHANCEMENTS6 0x80000000
+#define chipMinorFeatures5_SH_ENHANCEMENTS7 0x00000001
+#define chipMinorFeatures5_BUG_FIXES20 0x00000002
+#define chipMinorFeatures5_DE_ADDRESS_40 0x00000004
+#define chipMinorFeatures5_MINI_MMU_FIX 0x00000008
+#define chipMinorFeatures5_EEZ 0x00000010
+#define chipMinorFeatures5_BUG_FIXES21 0x00000020
+#define chipMinorFeatures5_EXTRA_VG_CAPS 0x00000040
+#define chipMinorFeatures5_MULTI_SRC_V15 0x00000080
+#define chipMinorFeatures5_BUG_FIXES22 0x00000100
+#define chipMinorFeatures5_HALTI3 0x00000200
+#define chipMinorFeatures5_TESSELATION_SHADERS 0x00000400
+#define chipMinorFeatures5_2D_ONE_PASS_FILTER_TAP 0x00000800
+#define chipMinorFeatures5_MULTI_SRC_V2_STR_QUAD 0x00001000
+#define chipMinorFeatures5_SEPARATE_SRC_DST 0x00002000
+#define chipMinorFeatures5_HALTI4 0x00004000
+#define chipMinorFeatures5_RA_WRITE_DEPTH 0x00008000
+#define chipMinorFeatures5_ANDROID_ONLY 0x00010000
+#define chipMinorFeatures5_HAS_PRODUCTID 0x00020000
+#define chipMinorFeatures5_TX_SUPPORT_DEC 0x00040000
+#define chipMinorFeatures5_S8_MSAA_COMPRESSION 0x00080000
+#define chipMinorFeatures5_PE_DITHER_FIX2 0x00100000
+#define chipMinorFeatures5_L2_CACHE_REMOVE 0x00200000
+#define chipMinorFeatures5_FE_ALLOW_RND_VTX_CNT 0x00400000
+#define chipMinorFeatures5_CUBE_MAP_FL28 0x00800000
+#define chipMinorFeatures5_TX_6BIT_FRAC 0x01000000
+#define chipMinorFeatures5_FE_ALLOW_STALL_PREFETCH_ENG 0x02000000
+#define chipMinorFeatures5_THIRD_PARTY_COMPRESSION 0x04000000
+#define chipMinorFeatures5_RS_DEPTHSTENCIL_NATIVE_SUPPORT 0x08000000
+#define chipMinorFeatures5_V2_MSAA_COMP_FIX 0x10000000
+#define chipMinorFeatures5_HALTI5 0x20000000
+#define chipMinorFeatures5_EVIS 0x40000000
+#define chipMinorFeatures5_BLT_ENGINE 0x80000000
+#define chipMinorFeatures6_BUG_FIXES_23 0x00000001
+#define chipMinorFeatures6_BUG_FIXES_24 0x00000002
+#define chipMinorFeatures6_DEC 0x00000004
+#define chipMinorFeatures6_VS_TILE_NV12 0x00000008
+#define chipMinorFeatures6_VS_TILE_NV12_10BIT 0x00000010
+#define chipMinorFeatures6_RENDER_TARGET_8 0x00000020
+#define chipMinorFeatures6_TEX_LOD_FLOW_CORR 0x00000040
+#define chipMinorFeatures6_FACE_LOD 0x00000080
+#define chipMinorFeatures6_MULTI_CORE_SEMAPHORE_STALL_V2 0x00000100
+#define chipMinorFeatures6_VMSAA 0x00000200
+#define chipMinorFeatures6_CHIP_ENABLE_LINK 0x00000400
+#define chipMinorFeatures6_MULTI_SRC_BLT_1_5_ENHANCEMENT 0x00000800
+#define chipMinorFeatures6_MULTI_SRC_BLT_BILINEAR_FILTER 0x00001000
+#define chipMinorFeatures6_RA_HZEZ_CLOCK_CONTROL 0x00002000
+#define chipMinorFeatures6_CACHE128B256BPERLINE 0x00004000
+#define chipMinorFeatures6_V4_COMPRESSION 0x00008000
+#define chipMinorFeatures6_PE2D_MAJOR_SUPER_TILE 0x00010000
+#define chipMinorFeatures6_PE_32BPC_COLORMASK_FIX 0x00020000
+#define chipMinorFeatures6_ALPHA_BLENDING_OPT 0x00040000
+#define chipMinorFeatures6_NEW_GPIPE 0x00080000
+#define chipMinorFeatures6_PIPELINE_32_ATTRIBUTES 0x00100000
+#define chipMinorFeatures6_MSAA_SHADING 0x00200000
+#define chipMinorFeatures6_NO_ANISTRO_FILTER 0x00400000
+#define chipMinorFeatures6_NO_ASTC 0x00800000
+#define chipMinorFeatures6_NO_DXT 0x01000000
+#define chipMinorFeatures6_HWTFB 0x02000000
+#define chipMinorFeatures6_RA_DEPTH_WRITE_MSAA1X_FIX 0x04000000
+#define chipMinorFeatures6_EZHZ_CLOCKGATE_FIX 0x08000000
+#define chipMinorFeatures6_SH_SNAP2PAGE_FIX 0x10000000
+#define chipMinorFeatures6_SH_HALFDEPENDENCY_FIX 0x20000000
+#define chipMinorFeatures6_USC_MCFILL_FIX 0x40000000
+#define chipMinorFeatures6_TPG_TCPERF_FIX 0x80000000
+#define chipMinorFeatures7_USC_MDFIFO_OVERFLOW_FIX 0x00000001
+#define chipMinorFeatures7_SH_TEXLD_BARRIER_IN_CS_FIX 0x00000002
+#define chipMinorFeatures7_RS_NEW_BASEADDR 0x00000004
+#define chipMinorFeatures7_PE_8BPP_DUALPIPE_FIX 0x00000008
+#define chipMinorFeatures7_SH_ADVANCED_INSTR 0x00000010
+#define chipMinorFeatures7_SH_FLAT_INTERPOLATION_DUAL16_FIX 0x00000020
+#define chipMinorFeatures7_USC_CONTINUOUS_FLUS_FIX 0x00000040
+#define chipMinorFeatures7_SH_SUPPORT_V4 0x00000080
+#define chipMinorFeatures7_SH_SUPPORT_ALPHA_KILL 0x00000100
+#define chipMinorFeatures7_PE_NO_ALPHA_TEST 0x00000200
+#define chipMinorFeatures7_TX_LOD_NEAREST_SELECT 0x00000400
+#define chipMinorFeatures7_SH_FIX_LDEXP 0x00000800
+#define chipMinorFeatures7_SUPPORT_MOVAI 0x00001000
+#define chipMinorFeatures7_SH_SNAP2PAGE_MAXPAGES_FIX 0x00002000
+#define chipMinorFeatures7_PE_RGBA16I_FIX 0x00004000
+#define chipMinorFeatures7_BLT_8bpp_256TILE_FC_FIX 0x00008000
+#define chipMinorFeatures7_PE_64BIT_FENCE_FIX 0x00010000
+#define chipMinorFeatures7_USC_FULL_CACHE_FIX 0x00020000
+#define chipMinorFeatures7_TX_YUV_ASSEMBLER_10BIT 0x00040000
+#define chipMinorFeatures7_FE_32BIT_INDEX_FIX 0x00080000
+#define chipMinorFeatures7_BLT_64BPP_MASKED_CLEAR_FIX 0x00100000
+#define chipMinorFeatures7_BIT_SECURITY 0x00200000
+#define chipMinorFeatures7_BIT_ROBUSTNESS 0x00400000
+#define chipMinorFeatures7_USC_ATOMIC_FIX 0x00800000
+#define chipMinorFeatures7_SH_PSO_MSAA1x_FIX 0x01000000
+#define chipMinorFeatures7_BIT_USC_VX_PERF_FIX 0x02000000
+#define chipMinorFeatures7_EVIS_NO_ABSDIFF 0x04000000
+#define chipMinorFeatures7_EVIS_NO_BITREPLACE 0x08000000
+#define chipMinorFeatures7_EVIS_NO_BOXFILTER 0x10000000
+#define chipMinorFeatures7_EVIS_NO_CORDIAC 0x20000000
+#define chipMinorFeatures7_EVIS_NO_DP32 0x40000000
+#define chipMinorFeatures7_EVIS_NO_FILTER 0x80000000
+#define chipMinorFeatures8_EVIS_NO_IADD 0x00000001
+#define chipMinorFeatures8_EVIS_NO_SELECTADD 0x00000002
+#define chipMinorFeatures8_EVIS_LERP_7OUTPUT 0x00000004
+#define chipMinorFeatures8_EVIS_ACCSQ_8OUTPUT 0x00000008
+#define chipMinorFeatures8_USC_GOS_ADDR_FIX 0x00000010
+#define chipMinorFeatures8_TX_8BIT_UVFRAC 0x00000020
+#define chipMinorFeatures8_TX_DESC_CACHE_CLOCKGATE_FIX 0x00000040
+#define chipMinorFeatures8_RSBLT_MSAA_DECOMPRESSION 0x00000080
+#define chipMinorFeatures8_TX_INTEGER_COORDINATE 0x00000100
+#define chipMinorFeatures8_DRAWID 0x00000200
+#define chipMinorFeatures8_PSIO_SAMPLEMASK_IN_R0ZW_FIX 0x00000400
+#define chipMinorFeatures8_TX_INTEGER_COORDINATE_V2 0x00000800
+#define chipMinorFeatures8_MULTI_CORE_BLOCK_SET_CONFIG 0x00001000
+#define chipMinorFeatures8_VG_RESOLVE_ENGINE 0x00002000
+#define chipMinorFeatures8_VG_PE_COLOR_KEY 0x00004000
+#define chipMinorFeatures8_VG_IM_INDEX_FORMAT 0x00008000
+#define chipMinorFeatures8_SNAPPAGE_CMD 0x00010000
+#define chipMinorFeatures8_SH_NO_INDEX_CONST_ON_A0 0x00020000
+#define chipMinorFeatures8_SH_NO_ONECONST_LIMIT 0x00040000
+#define chipMinorFeatures8_SH_IMG_LDST_ON_TEMP 0x00080000
+#define chipMinorFeatures8_COMPUTE_ONLY 0x00100000
+#define chipMinorFeatures8_SH_IMG_LDST_CLAMP 0x00200000
+#define chipMinorFeatures8_SH_ICACHE_ALLOC_COUNT_FIX 0x00400000
+#define chipMinorFeatures8_SH_ICACHE_PREFETCH 0x00800000
+#define chipMinorFeatures8_PE2D_SEPARATE_CACHE 0x01000000
+#define chipMinorFeatures8_VG_AYUV_INPUT_OUTPUT 0x02000000
+#define chipMinorFeatures8_VG_DOUBLE_IMAGE 0x04000000
+#define chipMinorFeatures8_VG_RECTANGLE_STRIPE_MODE 0x08000000
+#define chipMinorFeatures8_VG_MMU 0x10000000
+#define chipMinorFeatures8_VG_IM_FILTER 0x20000000
+#define chipMinorFeatures8_VG_IM_YUV_PACKET 0x40000000
+#define chipMinorFeatures8_VG_IM_YUV_PLANAR 0x80000000
+#define chipMinorFeatures9_VG_PE_YUV_PACKET 0x00000001
+#define chipMinorFeatures9_VG_COLOR_PRECISION_8_BIT 0x00000002
+#define chipMinorFeatures9_PE_MSAA_OQ_FIX 0x00000004
+#define chipMinorFeatures9_PSIO_MSAA_CL_FIX 0x00000008
+#define chipMinorFeatures9_USC_DEFER_FILL_FIX 0x00000010
+#define chipMinorFeatures9_SH_CLOCK_GATE_FIX 0x00000020
+#define chipMinorFeatures9_FE_NEED_DUMMYDRAW 0x00000040
+#define chipMinorFeatures9_PE2D_LINEAR_YUV420_OUTPUT 0x00000080
+#define chipMinorFeatures9_PE2D_LINEAR_YUV420_10BIT 0x00000100
+#define chipMinorFeatures9_MULTI_CLUSTER 0x00000200
+#define chipMinorFeatures9_VG_TS_CULLING 0x00000400
+#define chipMinorFeatures9_VG_FP25 0x00000800
+#define chipMinorFeatures9_SH_MULTI_WG_PACK 0x00001000
+#define chipMinorFeatures9_SH_DUAL16_SAMPLEMASK_ZW 0x00002000
+#define chipMinorFeatures9_TPG_TRIVIAL_MODE_FIX 0x00004000
+#define chipMinorFeatures9_TX_ASTC_MULTISLICE_FIX 0x00008000
+#define chipMinorFeatures9_FE_ROBUST_FIX 0x00010000
+#define chipMinorFeatures9_SH_GPIPE_ACCESS_FULLTEMPS 0x00020000
+#define chipMinorFeatures9_PSIO_INTERLOCK 0x00040000
+#define chipMinorFeatures9_PA_WIDELINE_FIX 0x00080000
+#define chipMinorFeatures9_WIDELINE_HELPER_FIX 0x00100000
+#define chipMinorFeatures9_G2D_3RD_PARTY_COMPRESSION_1_1 0x00200000
+#define chipMinorFeatures9_TX_FLUSH_L1CACHE 0x00400000
+#define chipMinorFeatures9_PE_DITHER_FIX2 0x00800000
+#define chipMinorFeatures9_G2D_DEC400 0x01000000
+#define chipMinorFeatures9_SH_TEXLD_U_FIX 0x02000000
+#define chipMinorFeatures9_MC_FCCACHE_BYTEMASK 0x04000000
+#define chipMinorFeatures9_SH_MULTI_WG_PACK_FIX 0x08000000
+#define chipMinorFeatures9_DC_OVERLAY_SCALING 0x10000000
+#define chipMinorFeatures9_DC_SOURCE_ROTATION 0x20000000
+#define chipMinorFeatures9_DC_TILED 0x40000000
+#define chipMinorFeatures9_DC_YUV_L1 0x80000000
+#define chipMinorFeatures10_DC_D30_OUTPUT 0x00000001
+#define chipMinorFeatures10_DC_MMU 0x00000002
+#define chipMinorFeatures10_DC_COMPRESSION 0x00000004
+#define chipMinorFeatures10_DC_QOS 0x00000008
+#define chipMinorFeatures10_PE_ADVANCE_BLEND_PART0 0x00000010
+#define chipMinorFeatures10_FE_PATCHLIST_FETCH_FIX 0x00000020
+#define chipMinorFeatures10_RA_CG_FIX 0x00000040
+#define chipMinorFeatures10_EVIS_VX2 0x00000080
+#define chipMinorFeatures10_NN_FLOAT 0x00000100
+#define chipMinorFeatures10_DEC400 0x00000200
+#define chipMinorFeatures10_LS_SUPPORT_PERCOMP_DEPENDENCY 0x00000400
+#define chipMinorFeatures10_TP_ENGINE 0x00000800
+#define chipMinorFeatures10_MULTI_CORE_BLOCK_SET_CONFIG2 0x00001000
+#define chipMinorFeatures10_PE_VMSAA_COVERAGE_CACHE_FIX 0x00002000
+#define chipMinorFeatures10_SECURITY_AHB 0x00004000
+#define chipMinorFeatures10_MULTICORE_SEMAPHORESTALL_V3 0x00008000
+#define chipMinorFeatures10_SMALLBATCH 0x00010000
+#define chipMinorFeatures10_SH_CMPLX 0x00020000
+#define chipMinorFeatures10_SH_IDIV0_SWZL_EHS 0x00040000
+#define chipMinorFeatures10_TX_LERP_LESS_BIT 0x00080000
+#define chipMinorFeatures10_SH_GM_ENDIAN 0x00100000
+#define chipMinorFeatures10_SH_GM_USC_UNALLOC 0x00200000
+#define chipMinorFeatures10_SH_END_OF_BB 0x00400000
+#define chipMinorFeatures10_VIP_V7 0x00800000
+#define chipMinorFeatures10_TX_BORDER_CLAMP_FIX 0x01000000
+#define chipMinorFeatures10_SH_IMG_LD_LASTPIXEL_FIX 0x02000000
+#define chipMinorFeatures10_ASYNC_BLT 0x04000000
+#define chipMinorFeatures10_ASYNC_FE_FENCE_FIX 0x08000000
+#define chipMinorFeatures10_PSCS_THROTTLE 0x10000000
+#define chipMinorFeatures10_SEPARATE_LS 0x20000000
+#define chipMinorFeatures10_MCFE 0x40000000
+#define chipMinorFeatures10_WIDELINE_TRIANGLE_EMU 0x80000000
+#define chipMinorFeatures11_VG_RESOLUTION_8K 0x00000001
+#define chipMinorFeatures11_FENCE_32BIT 0x00000002
+#define chipMinorFeatures11_FENCE_64BIT 0x00000004
+#define chipMinorFeatures11_NN_INTERLEVE8 0x00000008
+#define chipMinorFeatures11_TP_REORDER 0x00000010
+#define chipMinorFeatures11_PE_DEPTH_ONLY_OQFIX 0x00000020
+
+#endif /* COMMON_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
new file mode 100644
index 000000000..cf741c5c8
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2018 Etnaviv Project
+ */
+
+#include <drm/drm_drv.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+
+#include "common.xml.h"
+#include "state.xml.h"
+#include "state_blt.xml.h"
+#include "state_hi.xml.h"
+#include "state_3d.xml.h"
+#include "cmdstream.xml.h"
+
+/*
+ * Command Buffer helper:
+ */
+
+
+static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
+{
+ u32 *vaddr = (u32 *)buffer->vaddr;
+
+ BUG_ON(buffer->user_size >= buffer->size);
+
+ vaddr[buffer->user_size / 4] = data;
+ buffer->user_size += 4;
+}
+
+static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
+ u32 reg, u32 value)
+{
+ u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
+
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ /* write a register via cmd stream */
+ OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
+ VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
+ VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
+ OUT(buffer, value);
+}
+
+static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
+{
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ OUT(buffer, VIV_FE_END_HEADER_OP_END);
+}
+
+static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
+{
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
+}
+
+static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
+ u16 prefetch, u32 address)
+{
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(prefetch));
+ OUT(buffer, address);
+}
+
+static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
+ u32 from, u32 to)
+{
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
+ OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
+}
+
+static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
+{
+ CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
+ VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
+ VIVS_GL_SEMAPHORE_TOKEN_TO(to));
+}
+
+static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buffer, u8 pipe)
+{
+ u32 flush = 0;
+
+ lockdep_assert_held(&gpu->lock);
+
+ /*
+ * This assumes that if we're switching to 2D, we're switching
+ * away from 3D, and vice versa. Hence, if we're switching to
+ * the 2D core, we need to flush the 3D depth and color caches,
+ * otherwise we need to flush the 2D pixel engine cache.
+ */
+ if (gpu->exec_state == ETNA_PIPE_2D)
+ flush = VIVS_GL_FLUSH_CACHE_PE2D;
+ else if (gpu->exec_state == ETNA_PIPE_3D)
+ flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
+
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+
+ CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+ VIVS_GL_PIPE_SELECT_PIPE(pipe));
+}
+
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buf, u32 off, u32 len)
+{
+ u32 size = buf->size;
+ u32 *ptr = buf->vaddr + off;
+
+ dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
+ ptr, etnaviv_cmdbuf_get_va(buf,
+ &gpu->mmu_context->cmdbuf_mapping) +
+ off, size - len * 4 - off);
+
+ print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+ ptr, len * 4, 0);
+}
+
+/*
+ * Safely replace the WAIT of a waitlink with a new command and argument.
+ * The GPU may be executing this WAIT while we're modifying it, so we have
+ * to write it in a specific order to avoid the GPU branching to somewhere
+ * else. 'wl_offset' is the offset to the first byte of the WAIT command.
+ */
+static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
+ unsigned int wl_offset, u32 cmd, u32 arg)
+{
+ u32 *lw = buffer->vaddr + wl_offset;
+
+ lw[1] = arg;
+ mb();
+ lw[0] = cmd;
+ mb();
+}
+
+/*
+ * Ensure that there is space in the command buffer to contiguously write
+ * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
+ */
+static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
+{
+ if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
+ buffer->user_size = 0;
+
+ return etnaviv_cmdbuf_get_va(buffer,
+ &gpu->mmu_context->cmdbuf_mapping) +
+ buffer->user_size;
+}
+
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
+
+ /* initialize buffer */
+ buffer->user_size = 0;
+
+ CMD_WAIT(buffer);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
+
+ return buffer->user_size / 8;
+}
+
+u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
+{
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
+
+ buffer->user_size = 0;
+
+ if (gpu->identity.features & chipFeatures_PIPE_3D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+ VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+ mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ }
+
+ if (gpu->identity.features & chipFeatures_PIPE_2D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+ VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+ mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ }
+
+ CMD_END(buffer);
+
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ return buffer->user_size / 8;
+}
+
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
+{
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
+
+ buffer->user_size = 0;
+
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
+ VIVS_MMUv2_PTA_CONFIG_INDEX(id));
+
+ CMD_END(buffer);
+
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ return buffer->user_size / 8;
+}
+
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+ unsigned int waitlink_offset = buffer->user_size - 16;
+ u32 link_target, flush = 0;
+ bool has_blt = !!(gpu->identity.minor_features5 &
+ chipMinorFeatures5_BLT_ENGINE);
+
+ lockdep_assert_held(&gpu->lock);
+
+ if (gpu->exec_state == ETNA_PIPE_2D)
+ flush = VIVS_GL_FLUSH_CACHE_PE2D;
+ else if (gpu->exec_state == ETNA_PIPE_3D)
+ flush = VIVS_GL_FLUSH_CACHE_DEPTH |
+ VIVS_GL_FLUSH_CACHE_COLOR |
+ VIVS_GL_FLUSH_CACHE_TEXTURE |
+ VIVS_GL_FLUSH_CACHE_TEXTUREVS |
+ VIVS_GL_FLUSH_CACHE_SHADER_L2;
+
+ if (flush) {
+ unsigned int dwords = 7;
+
+ if (has_blt)
+ dwords += 10;
+
+ link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
+
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
+ if (gpu->exec_state == ETNA_PIPE_3D) {
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ }
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
+ CMD_END(buffer);
+
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(dwords),
+ link_target);
+ } else {
+ /* Replace the last link-wait with an "END" command */
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_END_HEADER_OP_END, 0);
+ }
+}
+
+/* Append a 'sync point' to the ring buffer. */
+void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
+{
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+ unsigned int waitlink_offset = buffer->user_size - 16;
+ u32 dwords, target;
+
+ lockdep_assert_held(&gpu->lock);
+
+ /*
+ * We need at most 3 dwords in the return target:
+ * 1 event + 1 end + 1 wait + 1 link.
+ */
+ dwords = 4;
+ target = etnaviv_buffer_reserve(gpu, buffer, dwords);
+
+ /* Signal sync point event */
+ CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
+ VIVS_GL_EVENT_FROM_PE);
+
+ /* Stop the FE to 'pause' the GPU */
+ CMD_END(buffer);
+
+ /* Append waitlink */
+ CMD_WAIT(buffer);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
+
+ /*
+ * Kick off the 'sync point' command by replacing the previous
+ * WAIT with a link to the address in the ring buffer.
+ */
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(dwords),
+ target);
+}
+
+/* Append a command buffer to the ring buffer. */
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ struct etnaviv_iommu_context *mmu_context, unsigned int event,
+ struct etnaviv_cmdbuf *cmdbuf)
+{
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+ unsigned int waitlink_offset = buffer->user_size - 16;
+ u32 return_target, return_dwords;
+ u32 link_target, link_dwords;
+ bool switch_context = gpu->exec_state != exec_state;
+ bool switch_mmu_context = gpu->mmu_context != mmu_context;
+ unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
+ bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
+ bool has_blt = !!(gpu->identity.minor_features5 &
+ chipMinorFeatures5_BLT_ENGINE);
+
+ lockdep_assert_held(&gpu->lock);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER))
+ etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+
+ link_target = etnaviv_cmdbuf_get_va(cmdbuf,
+ &gpu->mmu_context->cmdbuf_mapping);
+ link_dwords = cmdbuf->size / 8;
+
+ /*
+ * If we need maintenance prior to submitting this buffer, we will
+ * need to append a mmu flush load state, followed by a new
+ * link to this buffer - a total of four additional words.
+ */
+ if (need_flush || switch_context) {
+ u32 target, extra_dwords;
+
+ /* link command */
+ extra_dwords = 1;
+
+ /* flush command */
+ if (need_flush) {
+ if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
+ extra_dwords += 1;
+ else
+ extra_dwords += 3;
+ }
+
+ /* pipe switch commands */
+ if (switch_context)
+ extra_dwords += 4;
+
+ /* PTA load command */
+ if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
+ extra_dwords += 1;
+
+ target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
+ /*
+ * Switch MMU context if necessary. Must be done after the
+ * link target has been calculated, as the jump forward in the
+ * kernel ring still uses the last active MMU context before
+ * the switch.
+ */
+ if (switch_mmu_context) {
+ struct etnaviv_iommu_context *old_context = gpu->mmu_context;
+
+ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
+ etnaviv_iommu_context_put(old_context);
+ }
+
+ if (need_flush) {
+ /* Add the MMU flush */
+ if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
+ VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
+ VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+ } else {
+ u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
+ VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
+
+ if (switch_mmu_context &&
+ gpu->sec_mode == ETNA_SEC_KERNEL) {
+ unsigned short id =
+ etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
+ CMD_LOAD_STATE(buffer,
+ VIVS_MMUv2_PTA_CONFIG,
+ VIVS_MMUv2_PTA_CONFIG_INDEX(id));
+ }
+
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
+
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+ flush);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE,
+ SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE,
+ SYNC_RECIPIENT_PE);
+ }
+
+ gpu->flush_seq = new_flush_seq;
+ }
+
+ if (switch_context) {
+ etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
+ gpu->exec_state = exec_state;
+ }
+
+ /* And the link to the submitted buffer */
+ link_target = etnaviv_cmdbuf_get_va(cmdbuf,
+ &gpu->mmu_context->cmdbuf_mapping);
+ CMD_LINK(buffer, link_dwords, link_target);
+
+ /* Update the link target to point to above instructions */
+ link_target = target;
+ link_dwords = extra_dwords;
+ }
+
+ /*
+ * Append a LINK to the submitted command buffer to return to
+ * the ring buffer. return_target is the ring target address.
+ * We need at most 7 dwords in the return target: 2 cache flush +
+ * 2 semaphore stall + 1 event + 1 wait + 1 link.
+ */
+ return_dwords = 7;
+
+ /*
+ * When the BLT engine is present we need 6 more dwords in the return
+ * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
+ * but we don't need the normal TS flush state.
+ */
+ if (has_blt)
+ return_dwords += 6;
+
+ return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
+ CMD_LINK(cmdbuf, return_dwords, return_target);
+
+ /*
+ * Append a cache flush, stall, event, wait and link pointing back to
+ * the wait command to the ring buffer.
+ */
+ if (gpu->exec_state == ETNA_PIPE_2D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_PE2D);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_DEPTH |
+ VIVS_GL_FLUSH_CACHE_COLOR);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ }
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
+
+ CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
+ VIVS_GL_EVENT_FROM_PE);
+ CMD_WAIT(buffer);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER))
+ pr_info("stream link to 0x%08x @ 0x%08x %p\n",
+ return_target,
+ etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
+ cmdbuf->vaddr);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
+ print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+ cmdbuf->vaddr, cmdbuf->size, 0);
+
+ pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
+ pr_info("addr: 0x%08x\n", link_target);
+ pr_info("back: 0x%08x\n", return_target);
+ pr_info("event: %d\n", event);
+ }
+
+ /*
+ * Kick off the submitted command by replacing the previous
+ * WAIT with a link to the address in the ring buffer.
+ */
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
+ link_target);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER))
+ etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
new file mode 100644
index 000000000..b106e8b28
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#include <linux/kernel.h>
+
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+
+#include "cmdstream.xml.h"
+
+#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT)
+
+struct etna_validation_state {
+ struct etnaviv_gpu *gpu;
+ const struct drm_etnaviv_gem_submit_reloc *relocs;
+ unsigned int num_relocs;
+ u32 *start;
+};
+
+static const struct {
+ u16 offset;
+ u16 size;
+} etnaviv_sensitive_states[] __initconst = {
+#define ST(start, num) { (start) >> 2, (num) }
+ /* 2D */
+ ST(0x1200, 1),
+ ST(0x1228, 1),
+ ST(0x1238, 1),
+ ST(0x1284, 1),
+ ST(0x128c, 1),
+ ST(0x1304, 1),
+ ST(0x1310, 1),
+ ST(0x1318, 1),
+ ST(0x12800, 4),
+ ST(0x128a0, 4),
+ ST(0x128c0, 4),
+ ST(0x12970, 4),
+ ST(0x12a00, 8),
+ ST(0x12b40, 8),
+ ST(0x12b80, 8),
+ ST(0x12ce0, 8),
+ /* 3D */
+ ST(0x0644, 1),
+ ST(0x064c, 1),
+ ST(0x0680, 8),
+ ST(0x086c, 1),
+ ST(0x1028, 1),
+ ST(0x1410, 1),
+ ST(0x1430, 1),
+ ST(0x1458, 1),
+ ST(0x1460, 8),
+ ST(0x1480, 8),
+ ST(0x1500, 8),
+ ST(0x1520, 8),
+ ST(0x1608, 1),
+ ST(0x1610, 1),
+ ST(0x1658, 1),
+ ST(0x165c, 1),
+ ST(0x1664, 1),
+ ST(0x1668, 1),
+ ST(0x16a4, 1),
+ ST(0x16c0, 8),
+ ST(0x16e0, 8),
+ ST(0x1740, 8),
+ ST(0x17c0, 8),
+ ST(0x17e0, 8),
+ ST(0x2400, 14 * 16),
+ ST(0x3824, 1),
+ ST(0x10800, 32 * 16),
+ ST(0x14600, 16),
+ ST(0x14800, 8 * 8),
+#undef ST
+};
+
+#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u)
+static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE);
+
+void __init etnaviv_validate_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++)
+ bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset,
+ etnaviv_sensitive_states[i].size);
+}
+
+static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state,
+ unsigned int buf_offset, unsigned int state_addr)
+{
+ if (state->num_relocs && state->relocs->submit_offset < buf_offset) {
+ dev_warn_once(state->gpu->dev,
+ "%s: relocation for non-sensitive state 0x%x at offset %u\n",
+ __func__, state_addr,
+ state->relocs->submit_offset);
+ while (state->num_relocs &&
+ state->relocs->submit_offset < buf_offset) {
+ state->relocs++;
+ state->num_relocs--;
+ }
+ }
+}
+
+static bool etnaviv_validate_load_state(struct etna_validation_state *state,
+ u32 *ptr, unsigned int state_offset, unsigned int num)
+{
+ unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num);
+ unsigned int st_offset = state_offset, buf_offset;
+
+ for_each_set_bit_from(st_offset, etnaviv_states, size) {
+ buf_offset = (ptr - state->start +
+ st_offset - state_offset) * 4;
+
+ etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4);
+ if (state->num_relocs &&
+ state->relocs->submit_offset == buf_offset) {
+ state->relocs++;
+ state->num_relocs--;
+ continue;
+ }
+
+ dev_warn_ratelimited(state->gpu->dev,
+ "%s: load state touches restricted state 0x%x at offset %u\n",
+ __func__, st_offset * 4, buf_offset);
+ return false;
+ }
+
+ if (state->num_relocs) {
+ buf_offset = (ptr - state->start + num) * 4;
+ etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 +
+ state->relocs->submit_offset -
+ buf_offset);
+ }
+
+ return true;
+}
+
+static uint8_t cmd_length[32] = {
+ [FE_OPCODE_DRAW_PRIMITIVES] = 4,
+ [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
+ [FE_OPCODE_DRAW_INSTANCED] = 4,
+ [FE_OPCODE_NOP] = 2,
+ [FE_OPCODE_STALL] = 2,
+};
+
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream,
+ unsigned int size,
+ struct drm_etnaviv_gem_submit_reloc *relocs,
+ unsigned int reloc_size)
+{
+ struct etna_validation_state state;
+ u32 *buf = stream;
+ u32 *end = buf + size;
+
+ state.gpu = gpu;
+ state.relocs = relocs;
+ state.num_relocs = reloc_size;
+ state.start = stream;
+
+ while (buf < end) {
+ u32 cmd = *buf;
+ unsigned int len, n, off;
+ unsigned int op = cmd >> 27;
+
+ switch (op) {
+ case FE_OPCODE_LOAD_STATE:
+ n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT);
+ len = ALIGN(1 + n, 2);
+ if (buf + len > end)
+ break;
+
+ off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET);
+ if (!etnaviv_validate_load_state(&state, buf + 1,
+ off, n))
+ return false;
+ break;
+
+ case FE_OPCODE_DRAW_2D:
+ n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT);
+ if (n == 0)
+ n = 256;
+ len = 2 + n * 2;
+ break;
+
+ default:
+ len = cmd_length[op];
+ if (len == 0) {
+ dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n",
+ __func__, op, buf - state.start);
+ return false;
+ }
+ break;
+ }
+
+ buf += len;
+ }
+
+ if (buf > end) {
+ dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n",
+ __func__, buf - state.start, size);
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
new file mode 100644
index 000000000..9dc20d892
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018 Etnaviv Project
+ */
+
+#include <linux/dma-mapping.h>
+
+#include <drm/drm_mm.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
+
+#define SUBALLOC_SIZE SZ_512K
+#define SUBALLOC_GRANULE SZ_4K
+#define SUBALLOC_GRANULES (SUBALLOC_SIZE / SUBALLOC_GRANULE)
+
+struct etnaviv_cmdbuf_suballoc {
+ /* suballocated dma buffer properties */
+ struct device *dev;
+ void *vaddr;
+ dma_addr_t paddr;
+
+ /* allocation management */
+ struct mutex lock;
+ DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
+ int free_space;
+ wait_queue_head_t free_event;
+};
+
+struct etnaviv_cmdbuf_suballoc *
+etnaviv_cmdbuf_suballoc_new(struct device *dev)
+{
+ struct etnaviv_cmdbuf_suballoc *suballoc;
+ int ret;
+
+ suballoc = kzalloc(sizeof(*suballoc), GFP_KERNEL);
+ if (!suballoc)
+ return ERR_PTR(-ENOMEM);
+
+ suballoc->dev = dev;
+ mutex_init(&suballoc->lock);
+ init_waitqueue_head(&suballoc->free_event);
+
+ BUILD_BUG_ON(ETNAVIV_SOFTPIN_START_ADDRESS < SUBALLOC_SIZE);
+ suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE,
+ &suballoc->paddr, GFP_KERNEL);
+ if (!suballoc->vaddr) {
+ ret = -ENOMEM;
+ goto free_suballoc;
+ }
+
+ return suballoc;
+
+free_suballoc:
+ kfree(suballoc);
+
+ return ERR_PTR(ret);
+}
+
+int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base)
+{
+ return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
+ suballoc->paddr, SUBALLOC_SIZE);
+}
+
+void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
+{
+ etnaviv_iommu_put_suballoc_va(context, mapping);
+}
+
+void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
+{
+ dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr,
+ suballoc->paddr);
+ kfree(suballoc);
+}
+
+int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_cmdbuf *cmdbuf, u32 size)
+{
+ int granule_offs, order, ret;
+
+ cmdbuf->suballoc = suballoc;
+ cmdbuf->size = size;
+
+ order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
+retry:
+ mutex_lock(&suballoc->lock);
+ granule_offs = bitmap_find_free_region(suballoc->granule_map,
+ SUBALLOC_GRANULES, order);
+ if (granule_offs < 0) {
+ suballoc->free_space = 0;
+ mutex_unlock(&suballoc->lock);
+ ret = wait_event_interruptible_timeout(suballoc->free_event,
+ suballoc->free_space,
+ msecs_to_jiffies(10 * 1000));
+ if (!ret) {
+ dev_err(suballoc->dev,
+ "Timeout waiting for cmdbuf space\n");
+ return -ETIMEDOUT;
+ }
+ goto retry;
+ }
+ mutex_unlock(&suballoc->lock);
+ cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
+ cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
+
+ return 0;
+}
+
+void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
+{
+ struct etnaviv_cmdbuf_suballoc *suballoc = cmdbuf->suballoc;
+ int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
+ SUBALLOC_GRANULE);
+
+ mutex_lock(&suballoc->lock);
+ bitmap_release_region(suballoc->granule_map,
+ cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
+ order);
+ suballoc->free_space = 1;
+ mutex_unlock(&suballoc->lock);
+ wake_up_all(&suballoc->free_event);
+}
+
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf,
+ struct etnaviv_vram_mapping *mapping)
+{
+ return mapping->iova + buf->suballoc_offset;
+}
+
+dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
+{
+ return buf->suballoc->paddr + buf->suballoc_offset;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
new file mode 100644
index 000000000..ad6fd8eb0
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ */
+
+#ifndef __ETNAVIV_CMDBUF_H__
+#define __ETNAVIV_CMDBUF_H__
+
+#include <linux/types.h>
+
+struct device;
+struct etnaviv_iommu_context;
+struct etnaviv_vram_mapping;
+struct etnaviv_cmdbuf_suballoc;
+struct etnaviv_perfmon_request;
+
+struct etnaviv_cmdbuf {
+ /* suballocator this cmdbuf is allocated from */
+ struct etnaviv_cmdbuf_suballoc *suballoc;
+ /* cmdbuf properties */
+ int suballoc_offset;
+ void *vaddr;
+ u32 size;
+ u32 user_size;
+};
+
+struct etnaviv_cmdbuf_suballoc *
+etnaviv_cmdbuf_suballoc_new(struct device *dev);
+void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
+int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base);
+void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping);
+
+
+int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_cmdbuf *cmdbuf, u32 size);
+void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
+
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf,
+ struct etnaviv_vram_mapping *mapping);
+dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf);
+
+#endif /* __ETNAVIV_CMDBUF_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
new file mode 100644
index 000000000..1d2b4fb4b
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_of.h>
+#include <drm/drm_prime.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_drv.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
+
+/*
+ * DRM operations:
+ */
+
+
+static void load_gpu(struct drm_device *dev)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ struct etnaviv_gpu *g = priv->gpu[i];
+
+ if (g) {
+ int ret;
+
+ ret = etnaviv_gpu_init(g);
+ if (ret)
+ priv->gpu[i] = NULL;
+ }
+ }
+}
+
+static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct etnaviv_file_private *ctx;
+ int ret, i;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
+ priv->cmdbuf_suballoc);
+ if (!ctx->mmu) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ struct etnaviv_gpu *gpu = priv->gpu[i];
+ struct drm_gpu_scheduler *sched;
+
+ if (gpu) {
+ sched = &gpu->sched;
+ drm_sched_entity_init(&ctx->sched_entity[i],
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
+ }
+ }
+
+ file->driver_priv = ctx;
+
+ return 0;
+
+out_free:
+ kfree(ctx);
+ return ret;
+}
+
+static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct etnaviv_file_private *ctx = file->driver_priv;
+ unsigned int i;
+
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ struct etnaviv_gpu *gpu = priv->gpu[i];
+
+ if (gpu)
+ drm_sched_entity_destroy(&ctx->sched_entity[i]);
+ }
+
+ etnaviv_iommu_context_put(ctx->mmu);
+
+ kfree(ctx);
+}
+
+/*
+ * DRM debugfs:
+ */
+
+#ifdef CONFIG_DEBUG_FS
+static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+
+ etnaviv_gem_describe_objects(priv, m);
+
+ return 0;
+}
+
+static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ read_lock(&dev->vma_offset_manager->vm_lock);
+ drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
+ read_unlock(&dev->vma_offset_manager->vm_lock);
+
+ return 0;
+}
+
+static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct etnaviv_iommu_context *mmu_context;
+
+ seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
+
+ /*
+ * Lock the GPU to avoid a MMU context switch just now and elevate
+ * the refcount of the current context to avoid it disappearing from
+ * under our feet.
+ */
+ mutex_lock(&gpu->lock);
+ mmu_context = gpu->mmu_context;
+ if (mmu_context)
+ etnaviv_iommu_context_get(mmu_context);
+ mutex_unlock(&gpu->lock);
+
+ if (!mmu_context)
+ return 0;
+
+ mutex_lock(&mmu_context->lock);
+ drm_mm_print(&mmu_context->mm, &p);
+ mutex_unlock(&mmu_context->lock);
+
+ etnaviv_iommu_context_put(mmu_context);
+
+ return 0;
+}
+
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+ struct etnaviv_cmdbuf *buf = &gpu->buffer;
+ u32 size = buf->size;
+ u32 *ptr = buf->vaddr;
+ u32 i;
+
+ seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
+ buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
+ size - buf->user_size);
+
+ for (i = 0; i < size / 4; i++) {
+ if (i && !(i % 4))
+ seq_puts(m, "\n");
+ if (i % 4 == 0)
+ seq_printf(m, "\t0x%p: ", ptr + i);
+ seq_printf(m, "%08x ", *(ptr + i));
+ }
+ seq_puts(m, "\n");
+}
+
+static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+ seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
+
+ mutex_lock(&gpu->lock);
+ etnaviv_buffer_dump(gpu, m);
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+
+ return show(dev, m);
+}
+
+static int show_each_gpu(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct etnaviv_gpu *gpu;
+ int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
+ node->info_ent->data;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ gpu = priv->gpu[i];
+ if (!gpu)
+ continue;
+
+ ret = show(gpu, m);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static struct drm_info_list etnaviv_debugfs_list[] = {
+ {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
+ {"gem", show_unlocked, 0, etnaviv_gem_show},
+ { "mm", show_unlocked, 0, etnaviv_mm_show },
+ {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
+ {"ring", show_each_gpu, 0, etnaviv_ring_show},
+};
+
+static void etnaviv_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(etnaviv_debugfs_list,
+ ARRAY_SIZE(etnaviv_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif
+
+/*
+ * DRM ioctls:
+ */
+
+static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_param *args = data;
+ struct etnaviv_gpu *gpu;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ return etnaviv_gpu_get_param(gpu, args->param, &args->value);
+}
+
+static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_new *args = data;
+
+ if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
+ ETNA_BO_FORCE_MMU))
+ return -EINVAL;
+
+ return etnaviv_gem_new_handle(dev, file, args->size,
+ args->flags, &args->handle);
+}
+
+static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->flags)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = etnaviv_gem_cpu_fini(obj);
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = etnaviv_gem_mmap_offset(obj, &args->offset);
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_wait_fence *args = data;
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
+ struct etnaviv_gpu *gpu;
+
+ if (args->flags & ~(ETNA_WAIT_NONBLOCK))
+ return -EINVAL;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ if (args->flags & ETNA_WAIT_NONBLOCK)
+ timeout = NULL;
+
+ return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
+ timeout);
+}
+
+static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_etnaviv_gem_userptr *args = data;
+
+ if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
+ args->flags == 0)
+ return -EINVAL;
+
+ if (offset_in_page(args->user_ptr | args->user_size) ||
+ (uintptr_t)args->user_ptr != args->user_ptr ||
+ (u32)args->user_size != args->user_size ||
+ args->user_ptr & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (!access_ok((void __user *)(unsigned long)args->user_ptr,
+ args->user_size))
+ return -EFAULT;
+
+ return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
+ args->user_size, args->flags,
+ &args->handle);
+}
+
+static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_gem_wait *args = data;
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
+ struct drm_gem_object *obj;
+ struct etnaviv_gpu *gpu;
+ int ret;
+
+ if (args->flags & ~(ETNA_WAIT_NONBLOCK))
+ return -EINVAL;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->flags & ETNA_WAIT_NONBLOCK)
+ timeout = NULL;
+
+ ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_pm_domain *args = data;
+ struct etnaviv_gpu *gpu;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ return etnaviv_pm_query_dom(gpu, args);
+}
+
+static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_pm_signal *args = data;
+ struct etnaviv_gpu *gpu;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ return etnaviv_pm_query_sig(gpu, args);
+}
+
+static const struct drm_ioctl_desc etnaviv_ioctls[] = {
+#define ETNA_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
+ ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
+};
+
+DEFINE_DRM_GEM_FOPS(fops);
+
+static const struct drm_driver etnaviv_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_RENDER,
+ .open = etnaviv_open,
+ .postclose = etnaviv_postclose,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = etnaviv_debugfs_init,
+#endif
+ .ioctls = etnaviv_ioctls,
+ .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
+ .fops = &fops,
+ .name = "etnaviv",
+ .desc = "etnaviv DRM",
+ .date = "20151214",
+ .major = 1,
+ .minor = 3,
+};
+
+/*
+ * Platform driver:
+ */
+static int etnaviv_bind(struct device *dev)
+{
+ struct etnaviv_drm_private *priv;
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "failed to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ drm->dev_private = priv;
+
+ dma_set_max_seg_size(dev, SZ_2G);
+
+ mutex_init(&priv->gem_lock);
+ INIT_LIST_HEAD(&priv->gem_list);
+ priv->num_gpus = 0;
+ priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+
+ priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
+ if (IS_ERR(priv->cmdbuf_suballoc)) {
+ dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
+ ret = PTR_ERR(priv->cmdbuf_suballoc);
+ goto out_free_priv;
+ }
+
+ dev_set_drvdata(dev, drm);
+
+ ret = component_bind_all(dev, drm);
+ if (ret < 0)
+ goto out_destroy_suballoc;
+
+ load_gpu(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto out_unbind;
+
+ return 0;
+
+out_unbind:
+ component_unbind_all(dev, drm);
+out_destroy_suballoc:
+ etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
+out_free_priv:
+ kfree(priv);
+out_put:
+ drm_dev_put(drm);
+
+ return ret;
+}
+
+static void etnaviv_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct etnaviv_drm_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+
+ component_unbind_all(dev, drm);
+
+ etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
+
+ drm->dev_private = NULL;
+ kfree(priv);
+
+ drm_dev_put(drm);
+}
+
+static const struct component_master_ops etnaviv_master_ops = {
+ .bind = etnaviv_bind,
+ .unbind = etnaviv_unbind,
+};
+
+static int etnaviv_pdev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *first_node = NULL;
+ struct component_match *match = NULL;
+
+ if (!dev->platform_data) {
+ struct device_node *core_node;
+
+ for_each_compatible_node(core_node, NULL, "vivante,gc") {
+ if (!of_device_is_available(core_node))
+ continue;
+
+ if (!first_node)
+ first_node = core_node;
+
+ drm_of_component_match_add(&pdev->dev, &match,
+ component_compare_of, core_node);
+ }
+ } else {
+ char **names = dev->platform_data;
+ unsigned i;
+
+ for (i = 0; names[i]; i++)
+ component_match_add(dev, &match, component_compare_dev_name, names[i]);
+ }
+
+ /*
+ * PTA and MTLB can have 40 bit base addresses, but
+ * unfortunately, an entry in the MTLB can only point to a
+ * 32 bit base address of a STLB. Moreover, to initialize the
+ * MMU we need a command buffer with a 32 bit address because
+ * without an MMU there is only an indentity mapping between
+ * the internal 32 bit addresses and the bus addresses.
+ *
+ * To make things easy, we set the dma_coherent_mask to 32
+ * bit to make sure we are allocating the command buffers and
+ * TLBs in the lower 4 GiB address space.
+ */
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_dbg(&pdev->dev, "No suitable DMA available\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Apply the same DMA configuration to the virtual etnaviv
+ * device as the GPU we found. This assumes that all Vivante
+ * GPUs in the system share the same DMA constraints.
+ */
+ if (first_node)
+ of_dma_configure(&pdev->dev, first_node, true);
+
+ return component_master_add_with_match(dev, &etnaviv_master_ops, match);
+}
+
+static int etnaviv_pdev_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &etnaviv_master_ops);
+
+ return 0;
+}
+
+static struct platform_driver etnaviv_platform_driver = {
+ .probe = etnaviv_pdev_probe,
+ .remove = etnaviv_pdev_remove,
+ .driver = {
+ .name = "etnaviv",
+ },
+};
+
+static struct platform_device *etnaviv_drm;
+
+static int __init etnaviv_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
+ struct device_node *np;
+
+ etnaviv_validate_init();
+
+ ret = platform_driver_register(&etnaviv_gpu_driver);
+ if (ret != 0)
+ return ret;
+
+ ret = platform_driver_register(&etnaviv_platform_driver);
+ if (ret != 0)
+ goto unregister_gpu_driver;
+
+ /*
+ * If the DT contains at least one available GPU device, instantiate
+ * the DRM platform device.
+ */
+ for_each_compatible_node(np, NULL, "vivante,gc") {
+ if (!of_device_is_available(np))
+ continue;
+
+ pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
+ if (!pdev) {
+ ret = -ENOMEM;
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+
+ etnaviv_drm = pdev;
+ of_node_put(np);
+ break;
+ }
+
+ return 0;
+
+unregister_platform_driver:
+ platform_driver_unregister(&etnaviv_platform_driver);
+unregister_gpu_driver:
+ platform_driver_unregister(&etnaviv_gpu_driver);
+ return ret;
+}
+module_init(etnaviv_init);
+
+static void __exit etnaviv_exit(void)
+{
+ platform_device_unregister(etnaviv_drm);
+ platform_driver_unregister(&etnaviv_platform_driver);
+ platform_driver_unregister(&etnaviv_gpu_driver);
+}
+module_exit(etnaviv_exit);
+
+MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
+MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
+MODULE_DESCRIPTION("etnaviv DRM Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:etnaviv");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
new file mode 100644
index 000000000..f32f4771d
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#ifndef __ETNAVIV_DRV_H__
+#define __ETNAVIV_DRV_H__
+
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/sizes.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/etnaviv_drm.h>
+#include <drm/gpu_scheduler.h>
+
+struct etnaviv_cmdbuf;
+struct etnaviv_gpu;
+struct etnaviv_mmu;
+struct etnaviv_gem_object;
+struct etnaviv_gem_submit;
+struct etnaviv_iommu_global;
+
+#define ETNAVIV_SOFTPIN_START_ADDRESS SZ_4M /* must be >= SUBALLOC_SIZE */
+
+struct etnaviv_file_private {
+ struct etnaviv_iommu_context *mmu;
+ struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
+};
+
+struct etnaviv_drm_private {
+ int num_gpus;
+ struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+ gfp_t shm_gfp_mask;
+
+ struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
+ struct etnaviv_iommu_global *mmu_global;
+
+ /* list of GEM objects: */
+ struct mutex gem_lock;
+ struct list_head gem_list;
+};
+
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
+struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
+int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg);
+int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
+void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
+void *etnaviv_gem_vmap(struct drm_gem_object *obj);
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
+ struct drm_etnaviv_timespec *timeout);
+int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
+void etnaviv_gem_free_object(struct drm_gem_object *obj);
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle);
+int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
+ uintptr_t ptr, u32 size, u32 flags, u32 *handle);
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
+u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id);
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
+void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ struct etnaviv_iommu_context *mmu,
+ unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
+void etnaviv_validate_init(void);
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
+ u32 *stream, unsigned int size,
+ struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size);
+
+#ifdef CONFIG_DEBUG_FS
+void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
+ struct seq_file *m);
+#endif
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+/*
+ * Return the storage size of a structure with a variable length array.
+ * The array is nelem elements of elem_size, where the base structure
+ * is defined by base. If the size overflows size_t, return zero.
+ */
+static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
+{
+ if (elem_size && nelem > (SIZE_MAX - base) / elem_size)
+ return 0;
+ return base + nelem * elem_size;
+}
+
+/*
+ * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
+ * We need to calculate the timeout in terms of number of jiffies
+ * between the specified timeout and the current CLOCK_MONOTONIC time.
+ */
+static inline unsigned long etnaviv_timeout_to_jiffies(
+ const struct drm_etnaviv_timespec *timeout)
+{
+ struct timespec64 ts, to = {
+ .tv_sec = timeout->tv_sec,
+ .tv_nsec = timeout->tv_nsec,
+ };
+
+ ktime_get_ts64(&ts);
+
+ /* timeouts before "now" have already expired */
+ if (timespec64_compare(&to, &ts) <= 0)
+ return 0;
+
+ ts = timespec64_sub(to, ts);
+
+ return timespec64_to_jiffies(&ts);
+}
+
+#endif /* __ETNAVIV_DRV_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
new file mode 100644
index 000000000..0edcf8ceb
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/moduleparam.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_dump.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_sched.h"
+#include "state.xml.h"
+#include "state_hi.xml.h"
+
+static bool etnaviv_dump_core = true;
+module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
+
+struct core_dump_iterator {
+ void *start;
+ struct etnaviv_dump_object_header *hdr;
+ void *data;
+};
+
+static const unsigned short etnaviv_dump_registers[] = {
+ VIVS_HI_AXI_STATUS,
+ VIVS_HI_CLOCK_CONTROL,
+ VIVS_HI_IDLE_STATE,
+ VIVS_HI_AXI_CONFIG,
+ VIVS_HI_INTR_ENBL,
+ VIVS_HI_CHIP_IDENTITY,
+ VIVS_HI_CHIP_FEATURE,
+ VIVS_HI_CHIP_MODEL,
+ VIVS_HI_CHIP_REV,
+ VIVS_HI_CHIP_DATE,
+ VIVS_HI_CHIP_TIME,
+ VIVS_HI_CHIP_MINOR_FEATURE_0,
+ VIVS_HI_CACHE_CONTROL,
+ VIVS_HI_AXI_CONTROL,
+ VIVS_PM_POWER_CONTROLS,
+ VIVS_PM_MODULE_CONTROLS,
+ VIVS_PM_MODULE_STATUS,
+ VIVS_PM_PULSE_EATER,
+ VIVS_MC_MMU_FE_PAGE_TABLE,
+ VIVS_MC_MMU_TX_PAGE_TABLE,
+ VIVS_MC_MMU_PE_PAGE_TABLE,
+ VIVS_MC_MMU_PEZ_PAGE_TABLE,
+ VIVS_MC_MMU_RA_PAGE_TABLE,
+ VIVS_MC_DEBUG_MEMORY,
+ VIVS_MC_MEMORY_BASE_ADDR_RA,
+ VIVS_MC_MEMORY_BASE_ADDR_FE,
+ VIVS_MC_MEMORY_BASE_ADDR_TX,
+ VIVS_MC_MEMORY_BASE_ADDR_PEZ,
+ VIVS_MC_MEMORY_BASE_ADDR_PE,
+ VIVS_MC_MEMORY_TIMING_CONTROL,
+ VIVS_MC_BUS_CONFIG,
+ VIVS_FE_DMA_STATUS,
+ VIVS_FE_DMA_DEBUG_STATE,
+ VIVS_FE_DMA_ADDRESS,
+ VIVS_FE_DMA_LOW,
+ VIVS_FE_DMA_HIGH,
+ VIVS_FE_AUTO_FLUSH,
+};
+
+static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
+ u32 type, void *data_end)
+{
+ struct etnaviv_dump_object_header *hdr = iter->hdr;
+
+ hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
+ hdr->type = cpu_to_le32(type);
+ hdr->file_offset = cpu_to_le32(iter->data - iter->start);
+ hdr->file_size = cpu_to_le32(data_end - iter->data);
+
+ iter->hdr++;
+ iter->data += le32_to_cpu(hdr->file_size);
+}
+
+static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
+ struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_dump_registers *reg = iter->data;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
+ reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
+ reg->value = cpu_to_le32(gpu_read(gpu, etnaviv_dump_registers[i]));
+ }
+
+ etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
+}
+
+static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
+ struct etnaviv_iommu_context *mmu, size_t mmu_size)
+{
+ etnaviv_iommu_dump(mmu, iter->data);
+
+ etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
+}
+
+static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
+ void *ptr, size_t size, u64 iova)
+{
+ memcpy(iter->data, ptr, size);
+
+ iter->hdr->iova = cpu_to_le64(iova);
+
+ etnaviv_core_dump_header(iter, type, iter->data + size);
+}
+
+void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
+{
+ struct etnaviv_gpu *gpu = submit->gpu;
+ struct core_dump_iterator iter;
+ struct etnaviv_gem_object *obj;
+ unsigned int n_obj, n_bomap_pages;
+ size_t file_size, mmu_size;
+ __le64 *bomap, *bomap_start;
+ int i;
+
+ /* Only catch the first event, or when manually re-armed */
+ if (!etnaviv_dump_core)
+ return;
+ etnaviv_dump_core = false;
+
+ mutex_lock(&submit->mmu_context->lock);
+
+ mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
+
+ /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
+ n_obj = 5;
+ n_bomap_pages = 0;
+ file_size = ARRAY_SIZE(etnaviv_dump_registers) *
+ sizeof(struct etnaviv_dump_registers) +
+ mmu_size + gpu->buffer.size + submit->cmdbuf.size;
+
+ /* Add in the active buffer objects */
+ for (i = 0; i < submit->nr_bos; i++) {
+ obj = submit->bos[i].obj;
+ file_size += obj->base.size;
+ n_bomap_pages += obj->base.size >> PAGE_SHIFT;
+ n_obj++;
+ }
+
+ /* If we have any buffer objects, add a bomap object */
+ if (n_bomap_pages) {
+ file_size += n_bomap_pages * sizeof(__le64);
+ n_obj++;
+ }
+
+ /* Add the size of the headers */
+ file_size += sizeof(*iter.hdr) * n_obj;
+
+ /* Allocate the file in vmalloc memory, it's likely to be big */
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
+ if (!iter.start) {
+ mutex_unlock(&submit->mmu_context->lock);
+ dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
+ return;
+ }
+
+ /* Point the data member after the headers */
+ iter.hdr = iter.start;
+ iter.data = &iter.hdr[n_obj];
+
+ memset(iter.hdr, 0, iter.data - iter.start);
+
+ etnaviv_core_dump_registers(&iter, gpu);
+ etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
+ gpu->buffer.size,
+ etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &submit->mmu_context->cmdbuf_mapping));
+
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ etnaviv_cmdbuf_get_va(&submit->cmdbuf,
+ &submit->mmu_context->cmdbuf_mapping));
+
+ mutex_unlock(&submit->mmu_context->lock);
+
+ /* Reserve space for the bomap */
+ if (n_bomap_pages) {
+ bomap_start = bomap = iter.data;
+ memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
+ bomap + n_bomap_pages);
+ } else {
+ /* Silence warning */
+ bomap_start = bomap = NULL;
+ }
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_vram_mapping *vram;
+ struct page **pages;
+ void *vaddr;
+
+ obj = submit->bos[i].obj;
+ vram = submit->bos[i].mapping;
+
+ mutex_lock(&obj->lock);
+ pages = etnaviv_gem_get_pages(obj);
+ mutex_unlock(&obj->lock);
+ if (!IS_ERR(pages)) {
+ int j;
+
+ iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
+
+ for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
+ *bomap++ = cpu_to_le64(page_to_phys(*pages++));
+ }
+
+ iter.hdr->iova = cpu_to_le64(vram->iova);
+
+ vaddr = etnaviv_gem_vmap(&obj->base);
+ if (vaddr)
+ memcpy(iter.data, vaddr, obj->base.size);
+
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
+ obj->base.size);
+ }
+
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
+
+ dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
new file mode 100644
index 000000000..a125c46b8
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ */
+
+#ifndef ETNAVIV_DUMP_H
+#define ETNAVIV_DUMP_H
+
+#include <linux/types.h>
+
+enum {
+ ETDUMP_MAGIC = 0x414e5445,
+ ETDUMP_BUF_REG = 0,
+ ETDUMP_BUF_MMU,
+ ETDUMP_BUF_RING,
+ ETDUMP_BUF_CMD,
+ ETDUMP_BUF_BOMAP,
+ ETDUMP_BUF_BO,
+ ETDUMP_BUF_END,
+};
+
+struct etnaviv_dump_object_header {
+ __le32 magic;
+ __le32 type;
+ __le32 file_offset;
+ __le32 file_size;
+ __le64 iova;
+ __le32 data[2];
+};
+
+/* Registers object, an array of these */
+struct etnaviv_dump_registers {
+ __le32 reg;
+ __le32 value;
+};
+
+#ifdef __KERNEL__
+struct etnaviv_gem_submit;
+void etnaviv_core_dump(struct etnaviv_gem_submit *submit);
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
new file mode 100644
index 000000000..5cf13e52f
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#include <drm/drm_prime.h>
+#include <linux/dma-mapping.h>
+#include <linux/shmem_fs.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+static struct lock_class_key etnaviv_shm_lock_class;
+static struct lock_class_key etnaviv_userptr_lock_class;
+
+static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct drm_device *dev = etnaviv_obj->base.dev;
+ struct sg_table *sgt = etnaviv_obj->sgt;
+
+ /*
+ * For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent.
+ */
+ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
+ dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+}
+
+static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct drm_device *dev = etnaviv_obj->base.dev;
+ struct sg_table *sgt = etnaviv_obj->sgt;
+
+ /*
+ * For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ *
+ * WARNING: The DMA API does not support concurrent CPU
+ * and device access to the memory area. With BIDIRECTIONAL,
+ * we will clean the cache lines which overlap the region,
+ * and invalidate all cache lines (partially) contained in
+ * the region.
+ *
+ * If you have dirty data in the overlapping cache lines,
+ * that will corrupt the GPU-written data. If you have
+ * written into the remainder of the region, this can
+ * discard those writes.
+ */
+ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
+ dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+}
+
+/* called with etnaviv_obj->lock held */
+static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct drm_device *dev = etnaviv_obj->base.dev;
+ struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
+
+ if (IS_ERR(p)) {
+ dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
+ return PTR_ERR(p);
+ }
+
+ etnaviv_obj->pages = p;
+
+ return 0;
+}
+
+static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ if (etnaviv_obj->sgt) {
+ etnaviv_gem_scatterlist_unmap(etnaviv_obj);
+ sg_free_table(etnaviv_obj->sgt);
+ kfree(etnaviv_obj->sgt);
+ etnaviv_obj->sgt = NULL;
+ }
+ if (etnaviv_obj->pages) {
+ drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
+ true, false);
+
+ etnaviv_obj->pages = NULL;
+ }
+}
+
+struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ int ret;
+
+ lockdep_assert_held(&etnaviv_obj->lock);
+
+ if (!etnaviv_obj->pages) {
+ ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ if (!etnaviv_obj->sgt) {
+ struct drm_device *dev = etnaviv_obj->base.dev;
+ int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt;
+
+ sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
+ etnaviv_obj->pages, npages);
+ if (IS_ERR(sgt)) {
+ dev_err(dev->dev, "failed to allocate sgt: %ld\n",
+ PTR_ERR(sgt));
+ return ERR_CAST(sgt);
+ }
+
+ etnaviv_obj->sgt = sgt;
+
+ etnaviv_gem_scatter_map(etnaviv_obj);
+ }
+
+ return etnaviv_obj->pages;
+}
+
+void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ lockdep_assert_held(&etnaviv_obj->lock);
+ /* when we start tracking the pin count, then do something here */
+}
+
+static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+ struct vm_area_struct *vma)
+{
+ pgprot_t vm_page_prot;
+
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+
+ vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+ if (etnaviv_obj->flags & ETNA_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
+ } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_page_prot);
+ } else {
+ /*
+ * Shunt off cached objs to shmem file so they have their own
+ * address_space (so unmap_mapping_range does what we want,
+ * in particular in the case of mmap'd dmabufs)
+ */
+ vma->vm_pgoff = 0;
+ vma_set_file(vma, etnaviv_obj->base.filp);
+
+ vma->vm_page_prot = vm_page_prot;
+ }
+
+ return 0;
+}
+
+static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
+}
+
+static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct page **pages, *page;
+ pgoff_t pgoff;
+ int err;
+
+ /*
+ * Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet. Note that vmf_insert_page() is
+ * specifically coded to take care of this, so we don't have to.
+ */
+ err = mutex_lock_interruptible(&etnaviv_obj->lock);
+ if (err)
+ return VM_FAULT_NOPAGE;
+ /* make sure we have pages attached now */
+ pages = etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+
+ if (IS_ERR(pages)) {
+ err = PTR_ERR(pages);
+ return vmf_error(err);
+ }
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ page = pages[pgoff];
+
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
+ page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
+
+ return vmf_insert_page(vma, vmf->address, page);
+}
+
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
+{
+ int ret;
+
+ /* Make it mmapable */
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ dev_err(obj->dev->dev, "could not allocate mmap offset\n");
+ else
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ return ret;
+}
+
+static struct etnaviv_vram_mapping *
+etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
+ struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_vram_mapping *mapping;
+
+ list_for_each_entry(mapping, &obj->vram_list, obj_node) {
+ if (mapping->context == context)
+ return mapping;
+ }
+
+ return NULL;
+}
+
+void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
+{
+ struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+ mutex_lock(&etnaviv_obj->lock);
+ WARN_ON(mapping->use == 0);
+ mapping->use -= 1;
+ mutex_unlock(&etnaviv_obj->lock);
+
+ drm_gem_object_put(&etnaviv_obj->base);
+}
+
+struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+ struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
+ u64 va)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_vram_mapping *mapping;
+ struct page **pages;
+ int ret = 0;
+
+ mutex_lock(&etnaviv_obj->lock);
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
+ if (mapping) {
+ /*
+ * Holding the object lock prevents the use count changing
+ * beneath us. If the use count is zero, the MMU might be
+ * reaping this object, so take the lock and re-check that
+ * the MMU owns this mapping to close this race.
+ */
+ if (mapping->use == 0) {
+ mutex_lock(&mmu_context->lock);
+ if (mapping->context == mmu_context)
+ if (va && mapping->iova != va) {
+ etnaviv_iommu_reap_mapping(mapping);
+ mapping = NULL;
+ } else {
+ mapping->use += 1;
+ }
+ else
+ mapping = NULL;
+ mutex_unlock(&mmu_context->lock);
+ if (mapping)
+ goto out;
+ } else {
+ mapping->use += 1;
+ goto out;
+ }
+ }
+
+ pages = etnaviv_gem_get_pages(etnaviv_obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out;
+ }
+
+ /*
+ * See if we have a reaped vram mapping we can re-use before
+ * allocating a fresh mapping.
+ */
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
+ if (!mapping) {
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&mapping->scan_node);
+ mapping->object = etnaviv_obj;
+ } else {
+ list_del(&mapping->obj_node);
+ }
+
+ mapping->use = 1;
+
+ ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
+ mmu_context->global->memory_base,
+ mapping, va);
+ if (ret < 0)
+ kfree(mapping);
+ else
+ list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
+
+out:
+ mutex_unlock(&etnaviv_obj->lock);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Take a reference on the object */
+ drm_gem_object_get(obj);
+ return mapping;
+}
+
+void *etnaviv_gem_vmap(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ if (etnaviv_obj->vaddr)
+ return etnaviv_obj->vaddr;
+
+ mutex_lock(&etnaviv_obj->lock);
+ /*
+ * Need to check again, as we might have raced with another thread
+ * while waiting for the mutex.
+ */
+ if (!etnaviv_obj->vaddr)
+ etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+
+ return etnaviv_obj->vaddr;
+}
+
+static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+{
+ struct page **pages;
+
+ lockdep_assert_held(&obj->lock);
+
+ pages = etnaviv_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return NULL;
+
+ return vmap(pages, obj->base.size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+}
+
+static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
+{
+ if (op & ETNA_PREP_READ)
+ return DMA_FROM_DEVICE;
+ else if (op & ETNA_PREP_WRITE)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_BIDIRECTIONAL;
+}
+
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
+ struct drm_etnaviv_timespec *timeout)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct drm_device *dev = obj->dev;
+ bool write = !!(op & ETNA_PREP_WRITE);
+ int ret;
+
+ if (!etnaviv_obj->sgt) {
+ void *ret;
+
+ mutex_lock(&etnaviv_obj->lock);
+ ret = etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ }
+
+ if (op & ETNA_PREP_NOSYNC) {
+ if (!dma_resv_test_signaled(obj->resv,
+ dma_resv_usage_rw(write)))
+ return -EBUSY;
+ } else {
+ unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
+
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+ true, remain);
+ if (ret <= 0)
+ return ret == 0 ? -ETIMEDOUT : ret;
+ }
+
+ if (etnaviv_obj->flags & ETNA_BO_CACHED) {
+ dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
+ etnaviv_op_to_dma_dir(op));
+ etnaviv_obj->last_cpu_prep_op = op;
+ }
+
+ return 0;
+}
+
+int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ if (etnaviv_obj->flags & ETNA_BO_CACHED) {
+ /* fini without a prep is almost certainly a userspace error */
+ WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
+ dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
+ etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
+ etnaviv_obj->last_cpu_prep_op = 0;
+ }
+
+ return 0;
+}
+
+int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+ struct drm_etnaviv_timespec *timeout)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct dma_resv *robj = obj->resv;
+ unsigned long off = drm_vma_node_start(&obj->vma_node);
+ int r;
+
+ seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
+ etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
+ obj->name, kref_read(&obj->refcount),
+ off, etnaviv_obj->vaddr, obj->size);
+
+ r = dma_resv_lock(robj, NULL);
+ if (r)
+ return;
+
+ dma_resv_describe(robj, m);
+ dma_resv_unlock(robj);
+}
+
+void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
+ struct seq_file *m)
+{
+ struct etnaviv_gem_object *etnaviv_obj;
+ int count = 0;
+ size_t size = 0;
+
+ mutex_lock(&priv->gem_lock);
+ list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
+ struct drm_gem_object *obj = &etnaviv_obj->base;
+
+ seq_puts(m, " ");
+ etnaviv_gem_describe(obj, m);
+ count++;
+ size += obj->size;
+ }
+ mutex_unlock(&priv->gem_lock);
+
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+ vunmap(etnaviv_obj->vaddr);
+ put_pages(etnaviv_obj);
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
+ .get_pages = etnaviv_gem_shmem_get_pages,
+ .release = etnaviv_gem_shmem_release,
+ .vmap = etnaviv_gem_vmap_impl,
+ .mmap = etnaviv_gem_mmap_obj,
+};
+
+void etnaviv_gem_free_object(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_drm_private *priv = obj->dev->dev_private;
+ struct etnaviv_vram_mapping *mapping, *tmp;
+
+ /* object should not be active */
+ WARN_ON(is_active(etnaviv_obj));
+
+ mutex_lock(&priv->gem_lock);
+ list_del(&etnaviv_obj->gem_node);
+ mutex_unlock(&priv->gem_lock);
+
+ list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
+ obj_node) {
+ struct etnaviv_iommu_context *context = mapping->context;
+
+ WARN_ON(mapping->use);
+
+ if (context)
+ etnaviv_iommu_unmap_gem(context, mapping);
+
+ list_del(&mapping->obj_node);
+ kfree(mapping);
+ }
+
+ drm_gem_free_mmap_offset(obj);
+ etnaviv_obj->ops->release(etnaviv_obj);
+ drm_gem_object_release(obj);
+
+ kfree(etnaviv_obj);
+}
+
+void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ mutex_lock(&priv->gem_lock);
+ list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
+ mutex_unlock(&priv->gem_lock);
+}
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = etnaviv_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
+ .free = etnaviv_gem_free_object,
+ .pin = etnaviv_gem_prime_pin,
+ .unpin = etnaviv_gem_prime_unpin,
+ .get_sg_table = etnaviv_gem_prime_get_sg_table,
+ .vmap = etnaviv_gem_prime_vmap,
+ .mmap = etnaviv_gem_mmap,
+ .vm_ops = &vm_ops,
+};
+
+static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
+ const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj;
+ unsigned sz = sizeof(*etnaviv_obj);
+ bool valid = true;
+
+ /* validate flags */
+ switch (flags & ETNA_BO_CACHE_MASK) {
+ case ETNA_BO_UNCACHED:
+ case ETNA_BO_CACHED:
+ case ETNA_BO_WC:
+ break;
+ default:
+ valid = false;
+ }
+
+ if (!valid) {
+ dev_err(dev->dev, "invalid cache flag: %x\n",
+ (flags & ETNA_BO_CACHE_MASK));
+ return -EINVAL;
+ }
+
+ etnaviv_obj = kzalloc(sz, GFP_KERNEL);
+ if (!etnaviv_obj)
+ return -ENOMEM;
+
+ etnaviv_obj->flags = flags;
+ etnaviv_obj->ops = ops;
+
+ mutex_init(&etnaviv_obj->lock);
+ INIT_LIST_HEAD(&etnaviv_obj->vram_list);
+
+ *obj = &etnaviv_obj->base;
+ (*obj)->funcs = &etnaviv_gem_object_funcs;
+
+ return 0;
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_gem_object *obj = NULL;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+
+ ret = etnaviv_gem_new_impl(dev, size, flags,
+ &etnaviv_gem_shmem_ops, &obj);
+ if (ret)
+ goto fail;
+
+ lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+
+ /*
+ * Our buffers are kept pinned, so allocating them from the MOVABLE
+ * zone is a really bad idea, and conflicts with CMA. See comments
+ * above new_inode() why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
+
+ etnaviv_gem_obj_add(dev, obj);
+
+ ret = drm_gem_handle_create(file, obj, handle);
+
+ /* drop reference from allocate - handle holds it now */
+fail:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
+ const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
+ if (ret)
+ return ret;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ *res = to_etnaviv_bo(obj);
+
+ return 0;
+}
+
+static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct page **pvec = NULL;
+ struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
+ int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+
+ might_lock_read(&current->mm->mmap_lock);
+
+ if (userptr->mm != current->mm)
+ return -EPERM;
+
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!pvec)
+ return -ENOMEM;
+
+ do {
+ unsigned num_pages = npages - pinned;
+ uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
+ struct page **pages = pvec + pinned;
+
+ ret = pin_user_pages_fast(ptr, num_pages,
+ FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
+ pages);
+ if (ret < 0) {
+ unpin_user_pages(pvec, pinned);
+ kvfree(pvec);
+ return ret;
+ }
+
+ pinned += ret;
+
+ } while (pinned < npages);
+
+ etnaviv_obj->pages = pvec;
+
+ return 0;
+}
+
+static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+ if (etnaviv_obj->sgt) {
+ etnaviv_gem_scatterlist_unmap(etnaviv_obj);
+ sg_free_table(etnaviv_obj->sgt);
+ kfree(etnaviv_obj->sgt);
+ }
+ if (etnaviv_obj->pages) {
+ int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+
+ unpin_user_pages(etnaviv_obj->pages, npages);
+ kvfree(etnaviv_obj->pages);
+ }
+}
+
+static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+ struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
+ .get_pages = etnaviv_gem_userptr_get_pages,
+ .release = etnaviv_gem_userptr_release,
+ .vmap = etnaviv_gem_vmap_impl,
+ .mmap = etnaviv_gem_userptr_mmap_obj,
+};
+
+int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
+ uintptr_t ptr, u32 size, u32 flags, u32 *handle)
+{
+ struct etnaviv_gem_object *etnaviv_obj;
+ int ret;
+
+ ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
+ &etnaviv_gem_userptr_ops, &etnaviv_obj);
+ if (ret)
+ return ret;
+
+ lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
+
+ etnaviv_obj->userptr.ptr = ptr;
+ etnaviv_obj->userptr.mm = current->mm;
+ etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
+
+ etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
+
+ ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&etnaviv_obj->base);
+ return ret;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
new file mode 100644
index 000000000..63688e6e4
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#ifndef __ETNAVIV_GEM_H__
+#define __ETNAVIV_GEM_H__
+
+#include <linux/dma-resv.h>
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_drv.h"
+
+struct dma_fence;
+struct etnaviv_gem_ops;
+struct etnaviv_gem_object;
+
+struct etnaviv_gem_userptr {
+ uintptr_t ptr;
+ struct mm_struct *mm;
+ bool ro;
+};
+
+struct etnaviv_vram_mapping {
+ struct list_head obj_node;
+ struct list_head scan_node;
+ struct list_head mmu_node;
+ struct etnaviv_gem_object *object;
+ struct etnaviv_iommu_context *context;
+ struct drm_mm_node vram_node;
+ unsigned int use;
+ u32 iova;
+};
+
+struct etnaviv_gem_object {
+ struct drm_gem_object base;
+ const struct etnaviv_gem_ops *ops;
+ struct mutex lock;
+
+ u32 flags;
+
+ struct list_head gem_node;
+ struct etnaviv_gpu *gpu; /* non-null if active */
+ atomic_t gpu_active;
+ u32 access;
+
+ struct page **pages;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ struct list_head vram_list;
+
+ /* cache maintenance */
+ u32 last_cpu_prep_op;
+
+ struct etnaviv_gem_userptr userptr;
+};
+
+static inline
+struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct etnaviv_gem_object, base);
+}
+
+struct etnaviv_gem_ops {
+ int (*get_pages)(struct etnaviv_gem_object *);
+ void (*release)(struct etnaviv_gem_object *);
+ void *(*vmap)(struct etnaviv_gem_object *);
+ int (*mmap)(struct etnaviv_gem_object *, struct vm_area_struct *);
+};
+
+static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
+{
+ return atomic_read(&etnaviv_obj->gpu_active) != 0;
+}
+
+#define MAX_CMDS 4
+
+struct etnaviv_gem_submit_bo {
+ u32 flags;
+ u64 va;
+ struct etnaviv_gem_object *obj;
+ struct etnaviv_vram_mapping *mapping;
+};
+
+/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
+ * associated with the cmdstream submission for synchronization (and
+ * make it easier to unwind when things go wrong, etc).
+ */
+struct etnaviv_gem_submit {
+ struct drm_sched_job sched_job;
+ struct kref refcount;
+ struct etnaviv_file_private *ctx;
+ struct etnaviv_gpu *gpu;
+ struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
+ struct dma_fence *out_fence;
+ int out_fence_id;
+ struct list_head node; /* GPU active submit list */
+ struct etnaviv_cmdbuf cmdbuf;
+ bool runtime_resumed;
+ u32 exec_state;
+ u32 flags;
+ unsigned int nr_pmrs;
+ struct etnaviv_perfmon_request *pmrs;
+ unsigned int nr_bos;
+ struct etnaviv_gem_submit_bo bos[];
+ /* No new members here, the previous one is variable-length! */
+};
+
+void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
+
+int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+ struct drm_etnaviv_timespec *timeout);
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
+ const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res);
+void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
+struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
+void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
+
+struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+ struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
+ u64 va);
+void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
+
+#endif /* __ETNAVIV_GEM_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
new file mode 100644
index 000000000..fc594ea5b
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2018 Etnaviv Project
+ */
+
+#include <drm/drm_prime.h>
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+
+MODULE_IMPORT_NS(DMA_BUF);
+
+static struct lock_class_key etnaviv_prime_lock_class;
+
+struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
+ return ERR_PTR(-EINVAL);
+
+ return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
+}
+
+int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ void *vaddr;
+
+ vaddr = etnaviv_gem_vmap(obj);
+ if (!vaddr)
+ return -ENOMEM;
+ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+}
+
+int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach) {
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ mutex_lock(&etnaviv_obj->lock);
+ etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+ }
+ return 0;
+}
+
+void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach) {
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+
+ mutex_lock(&etnaviv_obj->lock);
+ etnaviv_gem_put_pages(to_etnaviv_bo(obj));
+ mutex_unlock(&etnaviv_obj->lock);
+ }
+}
+
+static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
+
+ if (etnaviv_obj->vaddr)
+ dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+
+ /* Don't drop the pages for imported dmabuf, as they are not
+ * ours, just free the array we allocated:
+ */
+ kvfree(etnaviv_obj->pages);
+
+ drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
+}
+
+static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
+{
+ struct iosys_map map;
+ int ret;
+
+ lockdep_assert_held(&etnaviv_obj->lock);
+
+ ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+ if (ret)
+ return NULL;
+ return map.vaddr;
+}
+
+static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+ if (!ret) {
+ /* Drop the reference acquired by drm_gem_mmap_obj(). */
+ drm_gem_object_put(&etnaviv_obj->base);
+ }
+
+ return ret;
+}
+
+static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
+ /* .get_pages should never be called */
+ .release = etnaviv_gem_prime_release,
+ .vmap = etnaviv_gem_prime_vmap_impl,
+ .mmap = etnaviv_gem_prime_mmap_obj,
+};
+
+struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sgt)
+{
+ struct etnaviv_gem_object *etnaviv_obj;
+ size_t size = PAGE_ALIGN(attach->dmabuf->size);
+ int ret, npages;
+
+ ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
+ &etnaviv_gem_prime_ops, &etnaviv_obj);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ lockdep_set_class(&etnaviv_obj->lock, &etnaviv_prime_lock_class);
+
+ npages = size / PAGE_SIZE;
+
+ etnaviv_obj->sgt = sgt;
+ etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!etnaviv_obj->pages) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
+ if (ret)
+ goto fail;
+
+ etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
+
+ return &etnaviv_obj->base;
+
+fail:
+ drm_gem_object_put(&etnaviv_obj->base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
new file mode 100644
index 000000000..1ac916b24
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ */
+
+#include <drm/drm_file.h>
+#include <linux/dma-fence-array.h>
+#include <linux/file.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-resv.h>
+#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_drv.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_perfmon.h"
+#include "etnaviv_sched.h"
+
+/*
+ * Cmdstream submission:
+ */
+
+#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
+/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
+#define BO_LOCKED 0x4000
+#define BO_PINNED 0x2000
+
+static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
+ struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
+{
+ struct etnaviv_gem_submit *submit;
+ size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
+
+ submit = kzalloc(sz, GFP_KERNEL);
+ if (!submit)
+ return NULL;
+
+ submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
+ GFP_KERNEL);
+ if (!submit->pmrs) {
+ kfree(submit);
+ return NULL;
+ }
+ submit->nr_pmrs = nr_pmrs;
+
+ submit->gpu = gpu;
+ kref_init(&submit->refcount);
+
+ return submit;
+}
+
+static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
+ struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
+ unsigned nr_bos)
+{
+ struct drm_etnaviv_gem_submit_bo *bo;
+ unsigned i;
+ int ret = 0;
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
+ struct drm_gem_object *obj;
+
+ if (bo->flags & BO_INVALID_FLAGS) {
+ DRM_ERROR("invalid flags: %x\n", bo->flags);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ submit->bos[i].flags = bo->flags;
+ if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
+ if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
+ DRM_ERROR("invalid softpin address\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ submit->bos[i].va = bo->presumed;
+ }
+
+ /* normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, bo->handle);
+ if (!obj) {
+ DRM_ERROR("invalid handle %u at index %u\n",
+ bo->handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /*
+ * Take a refcount on the object. The file table lock
+ * prevents the object_idr's refcount on this being dropped.
+ */
+ drm_gem_object_get(obj);
+
+ submit->bos[i].obj = to_etnaviv_bo(obj);
+ }
+
+out_unlock:
+ submit->nr_bos = i;
+ spin_unlock(&file->table_lock);
+
+ return ret;
+}
+
+static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
+{
+ if (submit->bos[i].flags & BO_LOCKED) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ dma_resv_unlock(obj->resv);
+ submit->bos[i].flags &= ~BO_LOCKED;
+ }
+}
+
+static int submit_lock_objects(struct etnaviv_gem_submit *submit,
+ struct ww_acquire_ctx *ticket)
+{
+ int contended, slow_locked = -1, i, ret = 0;
+
+retry:
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ if (slow_locked == i)
+ slow_locked = -1;
+
+ contended = i;
+
+ if (!(submit->bos[i].flags & BO_LOCKED)) {
+ ret = dma_resv_lock_interruptible(obj->resv, ticket);
+ if (ret == -EALREADY)
+ DRM_ERROR("BO at index %u already on submit list\n",
+ i);
+ if (ret)
+ goto fail;
+ submit->bos[i].flags |= BO_LOCKED;
+ }
+ }
+
+ ww_acquire_done(ticket);
+
+ return 0;
+
+fail:
+ for (; i >= 0; i--)
+ submit_unlock_object(submit, i);
+
+ if (slow_locked > 0)
+ submit_unlock_object(submit, slow_locked);
+
+ if (ret == -EDEADLK) {
+ struct drm_gem_object *obj;
+
+ obj = &submit->bos[contended].obj->base;
+
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
+ if (!ret) {
+ submit->bos[contended].flags |= BO_LOCKED;
+ slow_locked = contended;
+ goto retry;
+ }
+ }
+
+ return ret;
+}
+
+static int submit_fence_sync(struct etnaviv_gem_submit *submit)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
+ struct dma_resv *robj = bo->obj->base.resv;
+
+ ret = dma_resv_reserve_fences(robj, 1);
+ if (ret)
+ return ret;
+
+ if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
+ continue;
+
+ ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
+ &bo->obj->base,
+ bo->flags & ETNA_SUBMIT_BO_WRITE);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+
+ dma_resv_add_fence(obj->resv, submit->out_fence, write ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
+ submit_unlock_object(submit, i);
+ }
+}
+
+static int submit_pin_objects(struct etnaviv_gem_submit *submit)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct etnaviv_vram_mapping *mapping;
+
+ mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
+ submit->mmu_context,
+ submit->bos[i].va);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ break;
+ }
+
+ if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
+ submit->bos[i].va != mapping->iova) {
+ etnaviv_gem_mapping_unreference(mapping);
+ return -EINVAL;
+ }
+
+ atomic_inc(&etnaviv_obj->gpu_active);
+
+ submit->bos[i].flags |= BO_PINNED;
+ submit->bos[i].mapping = mapping;
+ }
+
+ return ret;
+}
+
+static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
+ struct etnaviv_gem_submit_bo **bo)
+{
+ if (idx >= submit->nr_bos) {
+ DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
+ return -EINVAL;
+ }
+
+ *bo = &submit->bos[idx];
+
+ return 0;
+}
+
+/* process the reloc's and patch up the cmdstream as needed: */
+static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
+ u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
+ u32 nr_relocs)
+{
+ u32 i, last_offset = 0;
+ u32 *ptr = stream;
+ int ret;
+
+ /* Submits using softpin don't blend with relocs */
+ if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
+ return -EINVAL;
+
+ for (i = 0; i < nr_relocs; i++) {
+ const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
+ struct etnaviv_gem_submit_bo *bo;
+ u32 off;
+
+ if (unlikely(r->flags)) {
+ DRM_ERROR("invalid reloc flags\n");
+ return -EINVAL;
+ }
+
+ if (r->submit_offset % 4) {
+ DRM_ERROR("non-aligned reloc offset: %u\n",
+ r->submit_offset);
+ return -EINVAL;
+ }
+
+ /* offset in dwords: */
+ off = r->submit_offset / 4;
+
+ if ((off >= size ) ||
+ (off < last_offset)) {
+ DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+ return -EINVAL;
+ }
+
+ ret = submit_bo(submit, r->reloc_idx, &bo);
+ if (ret)
+ return ret;
+
+ if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+ DRM_ERROR("relocation %u outside object\n", i);
+ return -EINVAL;
+ }
+
+ ptr[off] = bo->mapping->iova + r->reloc_offset;
+
+ last_offset = off;
+ }
+
+ return 0;
+}
+
+static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
+ u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
+{
+ u32 i;
+
+ for (i = 0; i < submit->nr_pmrs; i++) {
+ const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
+ struct etnaviv_gem_submit_bo *bo;
+ int ret;
+
+ ret = submit_bo(submit, r->read_idx, &bo);
+ if (ret)
+ return ret;
+
+ /* at offset 0 a sequence number gets stored used for userspace sync */
+ if (r->read_offset == 0) {
+ DRM_ERROR("perfmon request: offset is 0");
+ return -EINVAL;
+ }
+
+ if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
+ DRM_ERROR("perfmon request: offset %u outside object", i);
+ return -EINVAL;
+ }
+
+ if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
+ DRM_ERROR("perfmon request: flags are not valid");
+ return -EINVAL;
+ }
+
+ if (etnaviv_pm_req_validate(r, exec_state)) {
+ DRM_ERROR("perfmon request: domain or signal not valid");
+ return -EINVAL;
+ }
+
+ submit->pmrs[i].flags = r->flags;
+ submit->pmrs[i].domain = r->domain;
+ submit->pmrs[i].signal = r->signal;
+ submit->pmrs[i].sequence = r->sequence;
+ submit->pmrs[i].offset = r->read_offset;
+ submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
+ }
+
+ return 0;
+}
+
+static void submit_cleanup(struct kref *kref)
+{
+ struct etnaviv_gem_submit *submit =
+ container_of(kref, struct etnaviv_gem_submit, refcount);
+ unsigned i;
+
+ if (submit->runtime_resumed)
+ pm_runtime_put_autosuspend(submit->gpu->dev);
+
+ if (submit->cmdbuf.suballoc)
+ etnaviv_cmdbuf_free(&submit->cmdbuf);
+
+ if (submit->mmu_context)
+ etnaviv_iommu_context_put(submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+ /* unpin all objects */
+ if (submit->bos[i].flags & BO_PINNED) {
+ etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
+ atomic_dec(&etnaviv_obj->gpu_active);
+ submit->bos[i].mapping = NULL;
+ submit->bos[i].flags &= ~BO_PINNED;
+ }
+
+ /* if the GPU submit failed, objects might still be locked */
+ submit_unlock_object(submit, i);
+ drm_gem_object_put(&etnaviv_obj->base);
+ }
+
+ wake_up_all(&submit->gpu->fence_event);
+
+ if (submit->out_fence) {
+ /* first remove from IDR, so fence can not be found anymore */
+ mutex_lock(&submit->gpu->fence_lock);
+ idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
+ mutex_unlock(&submit->gpu->fence_lock);
+ dma_fence_put(submit->out_fence);
+ }
+ kfree(submit->pmrs);
+ kfree(submit);
+}
+
+void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
+{
+ kref_put(&submit->refcount, submit_cleanup);
+}
+
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_file_private *ctx = file->driver_priv;
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_gem_submit *args = data;
+ struct drm_etnaviv_gem_submit_reloc *relocs;
+ struct drm_etnaviv_gem_submit_pmr *pmrs;
+ struct drm_etnaviv_gem_submit_bo *bos;
+ struct etnaviv_gem_submit *submit;
+ struct etnaviv_gpu *gpu;
+ struct sync_file *sync_file = NULL;
+ struct ww_acquire_ctx ticket;
+ int out_fence_fd = -1;
+ void *stream;
+ int ret;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ if (args->stream_size % 4) {
+ DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
+ args->stream_size);
+ return -EINVAL;
+ }
+
+ if (args->exec_state != ETNA_PIPE_3D &&
+ args->exec_state != ETNA_PIPE_2D &&
+ args->exec_state != ETNA_PIPE_VG) {
+ DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
+ return -EINVAL;
+ }
+
+ if (args->flags & ~ETNA_SUBMIT_FLAGS) {
+ DRM_ERROR("invalid flags: 0x%x\n", args->flags);
+ return -EINVAL;
+ }
+
+ if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
+ priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
+ DRM_ERROR("softpin requested on incompatible MMU\n");
+ return -EINVAL;
+ }
+
+ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
+ args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
+ DRM_ERROR("submit arguments out of size limits\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Copy the command submission and bo array to kernel space in
+ * one go, and do this outside of any locks.
+ */
+ bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
+ relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
+ pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
+ stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
+ if (!bos || !relocs || !pmrs || !stream) {
+ ret = -ENOMEM;
+ goto err_submit_cmds;
+ }
+
+ ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
+ args->nr_bos * sizeof(*bos));
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+
+ ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
+ args->nr_relocs * sizeof(*relocs));
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+
+ ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
+ args->nr_pmrs * sizeof(*pmrs));
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+
+ ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
+ args->stream_size);
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto err_submit_cmds;
+ }
+ }
+
+ ww_acquire_init(&ticket, &reservation_ww_class);
+
+ submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
+ if (!submit) {
+ ret = -ENOMEM;
+ goto err_submit_ww_acquire;
+ }
+
+ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
+ ALIGN(args->stream_size, 8) + 8);
+ if (ret)
+ goto err_submit_put;
+
+ submit->ctx = file->driver_priv;
+ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
+ submit->exec_state = args->exec_state;
+ submit->flags = args->flags;
+
+ ret = drm_sched_job_init(&submit->sched_job,
+ &ctx->sched_entity[args->pipe],
+ submit->ctx);
+ if (ret)
+ goto err_submit_put;
+
+ ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
+ if (ret)
+ goto err_submit_job;
+
+ if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
+ !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
+ relocs, args->nr_relocs)) {
+ ret = -EINVAL;
+ goto err_submit_job;
+ }
+
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
+ struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
+ if (!in_fence) {
+ ret = -EINVAL;
+ goto err_submit_job;
+ }
+
+ ret = drm_sched_job_add_dependency(&submit->sched_job,
+ in_fence);
+ if (ret)
+ goto err_submit_job;
+ }
+
+ ret = submit_pin_objects(submit);
+ if (ret)
+ goto err_submit_job;
+
+ ret = submit_reloc(submit, stream, args->stream_size / 4,
+ relocs, args->nr_relocs);
+ if (ret)
+ goto err_submit_job;
+
+ ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
+ if (ret)
+ goto err_submit_job;
+
+ memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
+
+ ret = submit_lock_objects(submit, &ticket);
+ if (ret)
+ goto err_submit_job;
+
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto err_submit_job;
+
+ ret = etnaviv_sched_push_job(submit);
+ if (ret)
+ goto err_submit_job;
+
+ submit_attach_object_fences(submit);
+
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ /*
+ * This can be improved: ideally we want to allocate the sync
+ * file before kicking off the GPU job and just attach the
+ * fence to the sync file here, eliminating the ENOMEM
+ * possibility at this stage.
+ */
+ sync_file = sync_file_create(submit->out_fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ /*
+ * When this late error is hit, the submit has already
+ * been handed over to the scheduler. At this point
+ * the sched_job must not be cleaned up.
+ */
+ goto err_submit_put;
+ }
+ fd_install(out_fence_fd, sync_file->file);
+ }
+
+ args->fence_fd = out_fence_fd;
+ args->fence = submit->out_fence_id;
+
+err_submit_job:
+ if (ret)
+ drm_sched_job_cleanup(&submit->sched_job);
+err_submit_put:
+ etnaviv_submit_put(submit);
+
+err_submit_ww_acquire:
+ ww_acquire_fini(&ticket);
+
+err_submit_cmds:
+ if (ret && (out_fence_fd >= 0))
+ put_unused_fd(out_fence_fd);
+ kvfree(stream);
+ kvfree(bos);
+ kvfree(relocs);
+ kvfree(pmrs);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
new file mode 100644
index 000000000..f667e7906
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -0,0 +1,1926 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/thermal.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_dump.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
+#include "etnaviv_sched.h"
+#include "common.xml.h"
+#include "state.xml.h"
+#include "state_hi.xml.h"
+#include "cmdstream.xml.h"
+
+static const struct platform_device_id gpu_ids[] = {
+ { .name = "etnaviv-gpu,2d" },
+ { },
+};
+
+/*
+ * Driver functions:
+ */
+
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
+{
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+
+ switch (param) {
+ case ETNAVIV_PARAM_GPU_MODEL:
+ *value = gpu->identity.model;
+ break;
+
+ case ETNAVIV_PARAM_GPU_REVISION:
+ *value = gpu->identity.revision;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_0:
+ *value = gpu->identity.features;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_1:
+ *value = gpu->identity.minor_features0;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_2:
+ *value = gpu->identity.minor_features1;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_3:
+ *value = gpu->identity.minor_features2;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_4:
+ *value = gpu->identity.minor_features3;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_5:
+ *value = gpu->identity.minor_features4;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_6:
+ *value = gpu->identity.minor_features5;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_7:
+ *value = gpu->identity.minor_features6;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_8:
+ *value = gpu->identity.minor_features7;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_9:
+ *value = gpu->identity.minor_features8;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_10:
+ *value = gpu->identity.minor_features9;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_11:
+ *value = gpu->identity.minor_features10;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_12:
+ *value = gpu->identity.minor_features11;
+ break;
+
+ case ETNAVIV_PARAM_GPU_STREAM_COUNT:
+ *value = gpu->identity.stream_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_REGISTER_MAX:
+ *value = gpu->identity.register_max;
+ break;
+
+ case ETNAVIV_PARAM_GPU_THREAD_COUNT:
+ *value = gpu->identity.thread_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
+ *value = gpu->identity.vertex_cache_size;
+ break;
+
+ case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
+ *value = gpu->identity.shader_core_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
+ *value = gpu->identity.pixel_pipes;
+ break;
+
+ case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
+ *value = gpu->identity.vertex_output_buffer_size;
+ break;
+
+ case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
+ *value = gpu->identity.buffer_size;
+ break;
+
+ case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
+ *value = gpu->identity.instruction_count;
+ break;
+
+ case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
+ *value = gpu->identity.num_constants;
+ break;
+
+ case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
+ *value = gpu->identity.varyings_count;
+ break;
+
+ case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
+ if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
+ *value = ETNAVIV_SOFTPIN_START_ADDRESS;
+ else
+ *value = ~0ULL;
+ break;
+
+ case ETNAVIV_PARAM_GPU_PRODUCT_ID:
+ *value = gpu->identity.product_id;
+ break;
+
+ case ETNAVIV_PARAM_GPU_CUSTOMER_ID:
+ *value = gpu->identity.customer_id;
+ break;
+
+ case ETNAVIV_PARAM_GPU_ECO_ID:
+ *value = gpu->identity.eco_id;
+ break;
+
+ default:
+ DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+#define etnaviv_is_model_rev(gpu, mod, rev) \
+ ((gpu)->identity.model == chipModel_##mod && \
+ (gpu)->identity.revision == rev)
+#define etnaviv_field(val, field) \
+ (((val) & field##__MASK) >> field##__SHIFT)
+
+static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
+{
+ if (gpu->identity.minor_features0 &
+ chipMinorFeatures0_MORE_MINOR_FEATURES) {
+ u32 specs[4];
+ unsigned int streams;
+
+ specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
+ specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
+ specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
+ specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
+
+ gpu->identity.stream_count = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_STREAM_COUNT);
+ gpu->identity.register_max = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_REGISTER_MAX);
+ gpu->identity.thread_count = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_THREAD_COUNT);
+ gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
+ gpu->identity.shader_core_count = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
+ gpu->identity.pixel_pipes = etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
+ gpu->identity.vertex_output_buffer_size =
+ etnaviv_field(specs[0],
+ VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
+
+ gpu->identity.buffer_size = etnaviv_field(specs[1],
+ VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
+ gpu->identity.instruction_count = etnaviv_field(specs[1],
+ VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
+ gpu->identity.num_constants = etnaviv_field(specs[1],
+ VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
+
+ gpu->identity.varyings_count = etnaviv_field(specs[2],
+ VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
+
+ /* This overrides the value from older register if non-zero */
+ streams = etnaviv_field(specs[3],
+ VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
+ if (streams)
+ gpu->identity.stream_count = streams;
+ }
+
+ /* Fill in the stream count if not specified */
+ if (gpu->identity.stream_count == 0) {
+ if (gpu->identity.model >= 0x1000)
+ gpu->identity.stream_count = 4;
+ else
+ gpu->identity.stream_count = 1;
+ }
+
+ /* Convert the register max value */
+ if (gpu->identity.register_max)
+ gpu->identity.register_max = 1 << gpu->identity.register_max;
+ else if (gpu->identity.model == chipModel_GC400)
+ gpu->identity.register_max = 32;
+ else
+ gpu->identity.register_max = 64;
+
+ /* Convert thread count */
+ if (gpu->identity.thread_count)
+ gpu->identity.thread_count = 1 << gpu->identity.thread_count;
+ else if (gpu->identity.model == chipModel_GC400)
+ gpu->identity.thread_count = 64;
+ else if (gpu->identity.model == chipModel_GC500 ||
+ gpu->identity.model == chipModel_GC530)
+ gpu->identity.thread_count = 128;
+ else
+ gpu->identity.thread_count = 256;
+
+ if (gpu->identity.vertex_cache_size == 0)
+ gpu->identity.vertex_cache_size = 8;
+
+ if (gpu->identity.shader_core_count == 0) {
+ if (gpu->identity.model >= 0x1000)
+ gpu->identity.shader_core_count = 2;
+ else
+ gpu->identity.shader_core_count = 1;
+ }
+
+ if (gpu->identity.pixel_pipes == 0)
+ gpu->identity.pixel_pipes = 1;
+
+ /* Convert virtex buffer size */
+ if (gpu->identity.vertex_output_buffer_size) {
+ gpu->identity.vertex_output_buffer_size =
+ 1 << gpu->identity.vertex_output_buffer_size;
+ } else if (gpu->identity.model == chipModel_GC400) {
+ if (gpu->identity.revision < 0x4000)
+ gpu->identity.vertex_output_buffer_size = 512;
+ else if (gpu->identity.revision < 0x4200)
+ gpu->identity.vertex_output_buffer_size = 256;
+ else
+ gpu->identity.vertex_output_buffer_size = 128;
+ } else {
+ gpu->identity.vertex_output_buffer_size = 512;
+ }
+
+ switch (gpu->identity.instruction_count) {
+ case 0:
+ if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+ gpu->identity.model == chipModel_GC880)
+ gpu->identity.instruction_count = 512;
+ else
+ gpu->identity.instruction_count = 256;
+ break;
+
+ case 1:
+ gpu->identity.instruction_count = 1024;
+ break;
+
+ case 2:
+ gpu->identity.instruction_count = 2048;
+ break;
+
+ default:
+ gpu->identity.instruction_count = 256;
+ break;
+ }
+
+ if (gpu->identity.num_constants == 0)
+ gpu->identity.num_constants = 168;
+
+ if (gpu->identity.varyings_count == 0) {
+ if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
+ gpu->identity.varyings_count = 12;
+ else
+ gpu->identity.varyings_count = 8;
+ }
+
+ /*
+ * For some cores, two varyings are consumed for position, so the
+ * maximum varying count needs to be reduced by one.
+ */
+ if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
+ etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
+ etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
+ etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
+ etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
+ etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
+ etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
+ etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+ etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
+ etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
+ etnaviv_is_model_rev(gpu, GC880, 0x5106))
+ gpu->identity.varyings_count -= 1;
+}
+
+static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+{
+ u32 chipIdentity;
+
+ chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
+
+ /* Special case for older graphic cores. */
+ if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
+ gpu->identity.model = chipModel_GC500;
+ gpu->identity.revision = etnaviv_field(chipIdentity,
+ VIVS_HI_CHIP_IDENTITY_REVISION);
+ } else {
+ u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
+
+ gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
+ gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
+ gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
+
+ /*
+ * Reading these two registers on GC600 rev 0x19 result in a
+ * unhandled fault: external abort on non-linefetch
+ */
+ if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
+ gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
+ gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
+ }
+
+ /*
+ * !!!! HACK ALERT !!!!
+ * Because people change device IDs without letting software
+ * know about it - here is the hack to make it all look the
+ * same. Only for GC400 family.
+ */
+ if ((gpu->identity.model & 0xff00) == 0x0400 &&
+ gpu->identity.model != chipModel_GC420) {
+ gpu->identity.model = gpu->identity.model & 0x0400;
+ }
+
+ /* Another special case */
+ if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
+ u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
+
+ if (chipDate == 0x20080814 && chipTime == 0x12051100) {
+ /*
+ * This IP has an ECO; put the correct
+ * revision in it.
+ */
+ gpu->identity.revision = 0x1051;
+ }
+ }
+
+ /*
+ * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
+ * reality it's just a re-branded GC3000. We can identify this
+ * core by the upper half of the revision register being all 1.
+ * Fix model/rev here, so all other places can refer to this
+ * core by its real identity.
+ */
+ if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
+ gpu->identity.model = chipModel_GC3000;
+ gpu->identity.revision &= 0xffff;
+ }
+
+ if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
+ gpu->identity.eco_id = 1;
+
+ if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
+ gpu->identity.eco_id = 1;
+ }
+
+ dev_info(gpu->dev, "model: GC%x, revision: %x\n",
+ gpu->identity.model, gpu->identity.revision);
+
+ gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
+ /*
+ * If there is a match in the HWDB, we aren't interested in the
+ * remaining register values, as they might be wrong.
+ */
+ if (etnaviv_fill_identity_from_hwdb(gpu))
+ return;
+
+ gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
+
+ /* Disable fast clear on GC700. */
+ if (gpu->identity.model == chipModel_GC700)
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
+
+ /* These models/revisions don't have the 2D pipe bit */
+ if ((gpu->identity.model == chipModel_GC500 &&
+ gpu->identity.revision <= 2) ||
+ gpu->identity.model == chipModel_GC300)
+ gpu->identity.features |= chipFeatures_PIPE_2D;
+
+ if ((gpu->identity.model == chipModel_GC500 &&
+ gpu->identity.revision < 2) ||
+ (gpu->identity.model == chipModel_GC300 &&
+ gpu->identity.revision < 0x2000)) {
+
+ /*
+ * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
+ * registers.
+ */
+ gpu->identity.minor_features0 = 0;
+ gpu->identity.minor_features1 = 0;
+ gpu->identity.minor_features2 = 0;
+ gpu->identity.minor_features3 = 0;
+ gpu->identity.minor_features4 = 0;
+ gpu->identity.minor_features5 = 0;
+ } else
+ gpu->identity.minor_features0 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
+
+ if (gpu->identity.minor_features0 &
+ chipMinorFeatures0_MORE_MINOR_FEATURES) {
+ gpu->identity.minor_features1 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
+ gpu->identity.minor_features2 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
+ gpu->identity.minor_features3 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
+ gpu->identity.minor_features4 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
+ gpu->identity.minor_features5 =
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
+ }
+
+ /* GC600/300 idle register reports zero bits where modules aren't present */
+ if (gpu->identity.model == chipModel_GC600 ||
+ gpu->identity.model == chipModel_GC300)
+ gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
+ VIVS_HI_IDLE_STATE_RA |
+ VIVS_HI_IDLE_STATE_SE |
+ VIVS_HI_IDLE_STATE_PA |
+ VIVS_HI_IDLE_STATE_SH |
+ VIVS_HI_IDLE_STATE_PE |
+ VIVS_HI_IDLE_STATE_DE |
+ VIVS_HI_IDLE_STATE_FE;
+
+ etnaviv_hw_specs(gpu);
+}
+
+static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
+{
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
+}
+
+static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
+{
+ if (gpu->identity.minor_features2 &
+ chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
+ clk_set_rate(gpu->clk_core,
+ gpu->base_rate_core >> gpu->freq_scale);
+ clk_set_rate(gpu->clk_shader,
+ gpu->base_rate_shader >> gpu->freq_scale);
+ } else {
+ unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+
+ clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
+ clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+ etnaviv_gpu_load_clock(gpu, clock);
+ }
+}
+
+static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
+{
+ u32 control, idle;
+ unsigned long timeout;
+ bool failed = true;
+
+ /* We hope that the GPU resets in under one second */
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (time_is_after_jiffies(timeout)) {
+ /* enable clock */
+ unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+ etnaviv_gpu_load_clock(gpu, control);
+
+ /* isolate the GPU. */
+ control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
+ VIVS_MMUv2_AHB_CONTROL_RESET);
+ } else {
+ /* set soft reset. */
+ control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+ }
+
+ /* wait for reset. */
+ usleep_range(10, 20);
+
+ /* reset soft reset bit. */
+ control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+ /* reset GPU isolation. */
+ control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+ /* read idle register. */
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+ /* try resetting again if FE is not idle */
+ if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
+ dev_dbg(gpu->dev, "FE is not idle\n");
+ continue;
+ }
+
+ /* read reset register. */
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+
+ /* is the GPU idle? */
+ if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
+ ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
+ dev_dbg(gpu->dev, "GPU is not idle\n");
+ continue;
+ }
+
+ /* disable debug registers, as they are not normally needed */
+ control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
+ failed = false;
+ break;
+ }
+
+ if (failed) {
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+
+ dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
+ idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
+ control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
+ control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
+
+ return -EBUSY;
+ }
+
+ /* We rely on the GPU running, so program the clock */
+ etnaviv_gpu_update_clock(gpu);
+
+ gpu->fe_running = false;
+ gpu->exec_state = -1;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = NULL;
+
+ return 0;
+}
+
+static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
+{
+ u32 pmc, ppc;
+
+ /* enable clock gating */
+ ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+
+ /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
+ if (gpu->identity.revision == 0x4301 ||
+ gpu->identity.revision == 0x4302)
+ ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
+
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
+
+ pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
+
+ /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
+ if (gpu->identity.model >= chipModel_GC400 &&
+ gpu->identity.model != chipModel_GC420 &&
+ !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
+
+ /*
+ * Disable PE clock gating on revs < 5.0.0.0 when HZ is
+ * present without a bug fix.
+ */
+ if (gpu->identity.revision < 0x5000 &&
+ gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
+ !(gpu->identity.minor_features1 &
+ chipMinorFeatures1_DISABLE_PE_GATING))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
+
+ if (gpu->identity.revision < 0x5422)
+ pmc |= BIT(15); /* Unknown bit */
+
+ /* Disable TX clock gating on affected core revisions. */
+ if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
+ etnaviv_is_model_rev(gpu, GC2000, 0x5108))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
+
+ /* Disable SE, RA and TX clock gating on affected core revisions. */
+ if (etnaviv_is_model_rev(gpu, GC7000, 0x6202))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE |
+ VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA |
+ VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
+
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
+
+ gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
+}
+
+void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
+{
+ gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
+ gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
+ VIVS_FE_COMMAND_CONTROL_ENABLE |
+ VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
+ VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
+ VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
+ }
+
+ gpu->fe_running = true;
+}
+
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
+{
+ u16 prefetch;
+ u32 address;
+
+ /* setup the MMU */
+ etnaviv_iommu_restore(gpu, context);
+
+ /* Start command processor */
+ prefetch = etnaviv_buffer_init(gpu);
+ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
+
+ etnaviv_gpu_start_fe(gpu, address, prefetch);
+}
+
+static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
+{
+ /*
+ * Base value for VIVS_PM_PULSE_EATER register on models where it
+ * cannot be read, extracted from vivante kernel driver.
+ */
+ u32 pulse_eater = 0x01590880;
+
+ if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
+ etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
+ pulse_eater |= BIT(23);
+
+ }
+
+ if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
+ etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
+ pulse_eater &= ~BIT(16);
+ pulse_eater |= BIT(17);
+ }
+
+ if ((gpu->identity.revision > 0x5420) &&
+ (gpu->identity.features & chipFeatures_PIPE_3D))
+ {
+ /* Performance fix: disable internal DFS */
+ pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
+ pulse_eater |= BIT(18);
+ }
+
+ gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
+}
+
+static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
+{
+ if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
+ etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
+ gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
+ u32 mc_memory_debug;
+
+ mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
+
+ if (gpu->identity.revision == 0x5007)
+ mc_memory_debug |= 0x0c;
+ else
+ mc_memory_debug |= 0x08;
+
+ gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
+ }
+
+ /* enable module-level clock gating */
+ etnaviv_gpu_enable_mlcg(gpu);
+
+ /*
+ * Update GPU AXI cache atttribute to "cacheable, no allocate".
+ * This is necessary to prevent the iMX6 SoC locking up.
+ */
+ gpu_write(gpu, VIVS_HI_AXI_CONFIG,
+ VIVS_HI_AXI_CONFIG_AWCACHE(2) |
+ VIVS_HI_AXI_CONFIG_ARCACHE(2));
+
+ /* GC2000 rev 5108 needs a special bus config */
+ if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
+ u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
+ bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
+ VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
+ bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
+ VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
+ gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
+ }
+
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
+ val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
+ gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
+ }
+
+ /* setup the pulse eater */
+ etnaviv_gpu_setup_pulse_eater(gpu);
+
+ gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
+}
+
+int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ dma_addr_t cmdbuf_paddr;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0) {
+ dev_err(gpu->dev, "Failed to enable GPU power domain\n");
+ goto pm_put;
+ }
+
+ etnaviv_hw_identify(gpu);
+
+ if (gpu->identity.model == 0) {
+ dev_err(gpu->dev, "Unknown GPU model\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /* Exclude VG cores with FE2.0 */
+ if (gpu->identity.features & chipFeatures_PIPE_VG &&
+ gpu->identity.features & chipFeatures_FE20) {
+ dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /*
+ * On cores with security features supported, we claim control over the
+ * security states.
+ */
+ if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
+ (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
+ gpu->sec_mode = ETNA_SEC_KERNEL;
+
+ ret = etnaviv_hw_reset(gpu);
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset failed\n");
+ goto fail;
+ }
+
+ ret = etnaviv_iommu_global_init(gpu);
+ if (ret)
+ goto fail;
+
+ /*
+ * If the GPU is part of a system with DMA addressing limitations,
+ * request pages for our SHM backend buffers from the DMA32 zone to
+ * hopefully avoid performance killing SWIOTLB bounce buffering.
+ */
+ if (dma_addressing_limited(gpu->dev))
+ priv->shm_gfp_mask |= GFP_DMA32;
+
+ /* Create buffer: */
+ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
+ PAGE_SIZE);
+ if (ret) {
+ dev_err(gpu->dev, "could not create command buffer\n");
+ goto fail;
+ }
+
+ /*
+ * Set the GPU linear window to cover the cmdbuf region, as the GPU
+ * won't be able to start execution otherwise. The alignment to 128M is
+ * chosen arbitrarily but helps in debugging, as the MMU offset
+ * calculations are much more straight forward this way.
+ *
+ * On MC1.0 cores the linear window offset is ignored by the TS engine,
+ * leading to inconsistent memory views. Avoid using the offset on those
+ * cores if possible, otherwise disable the TS feature.
+ */
+ cmdbuf_paddr = ALIGN_DOWN(etnaviv_cmdbuf_get_pa(&gpu->buffer), SZ_128M);
+
+ if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
+ (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
+ if (cmdbuf_paddr >= SZ_2G)
+ priv->mmu_global->memory_base = SZ_2G;
+ else
+ priv->mmu_global->memory_base = cmdbuf_paddr;
+ } else if (cmdbuf_paddr + SZ_128M >= SZ_2G) {
+ dev_info(gpu->dev,
+ "Need to move linear window on MC1.0, disabling TS\n");
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
+ priv->mmu_global->memory_base = SZ_2G;
+ }
+
+ /* Setup event management */
+ spin_lock_init(&gpu->event_spinlock);
+ init_completion(&gpu->event_free);
+ bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
+ complete(&gpu->event_free);
+
+ /* Now program the hardware */
+ mutex_lock(&gpu->lock);
+ etnaviv_gpu_hw_init(gpu);
+ mutex_unlock(&gpu->lock);
+
+ pm_runtime_mark_last_busy(gpu->dev);
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ gpu->initialized = true;
+
+ return 0;
+
+fail:
+ pm_runtime_mark_last_busy(gpu->dev);
+pm_put:
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct dma_debug {
+ u32 address[2];
+ u32 state[2];
+};
+
+static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
+{
+ u32 i;
+
+ debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
+
+ for (i = 0; i < 500; i++) {
+ debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
+
+ if (debug->address[0] != debug->address[1])
+ break;
+
+ if (debug->state[0] != debug->state[1])
+ break;
+ }
+}
+
+int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
+{
+ struct dma_debug debug;
+ u32 dma_lo, dma_hi, axi, idle;
+ int ret;
+
+ seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
+
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0)
+ goto pm_put;
+
+ dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
+ dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
+ axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+ verify_dma(gpu, &debug);
+
+ seq_puts(m, "\tidentity\n");
+ seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
+ seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
+ seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
+ seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
+ seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
+
+ seq_puts(m, "\tfeatures\n");
+ seq_printf(m, "\t major_features: 0x%08x\n",
+ gpu->identity.features);
+ seq_printf(m, "\t minor_features0: 0x%08x\n",
+ gpu->identity.minor_features0);
+ seq_printf(m, "\t minor_features1: 0x%08x\n",
+ gpu->identity.minor_features1);
+ seq_printf(m, "\t minor_features2: 0x%08x\n",
+ gpu->identity.minor_features2);
+ seq_printf(m, "\t minor_features3: 0x%08x\n",
+ gpu->identity.minor_features3);
+ seq_printf(m, "\t minor_features4: 0x%08x\n",
+ gpu->identity.minor_features4);
+ seq_printf(m, "\t minor_features5: 0x%08x\n",
+ gpu->identity.minor_features5);
+ seq_printf(m, "\t minor_features6: 0x%08x\n",
+ gpu->identity.minor_features6);
+ seq_printf(m, "\t minor_features7: 0x%08x\n",
+ gpu->identity.minor_features7);
+ seq_printf(m, "\t minor_features8: 0x%08x\n",
+ gpu->identity.minor_features8);
+ seq_printf(m, "\t minor_features9: 0x%08x\n",
+ gpu->identity.minor_features9);
+ seq_printf(m, "\t minor_features10: 0x%08x\n",
+ gpu->identity.minor_features10);
+ seq_printf(m, "\t minor_features11: 0x%08x\n",
+ gpu->identity.minor_features11);
+
+ seq_puts(m, "\tspecs\n");
+ seq_printf(m, "\t stream_count: %d\n",
+ gpu->identity.stream_count);
+ seq_printf(m, "\t register_max: %d\n",
+ gpu->identity.register_max);
+ seq_printf(m, "\t thread_count: %d\n",
+ gpu->identity.thread_count);
+ seq_printf(m, "\t vertex_cache_size: %d\n",
+ gpu->identity.vertex_cache_size);
+ seq_printf(m, "\t shader_core_count: %d\n",
+ gpu->identity.shader_core_count);
+ seq_printf(m, "\t pixel_pipes: %d\n",
+ gpu->identity.pixel_pipes);
+ seq_printf(m, "\t vertex_output_buffer_size: %d\n",
+ gpu->identity.vertex_output_buffer_size);
+ seq_printf(m, "\t buffer_size: %d\n",
+ gpu->identity.buffer_size);
+ seq_printf(m, "\t instruction_count: %d\n",
+ gpu->identity.instruction_count);
+ seq_printf(m, "\t num_constants: %d\n",
+ gpu->identity.num_constants);
+ seq_printf(m, "\t varyings_count: %d\n",
+ gpu->identity.varyings_count);
+
+ seq_printf(m, "\taxi: 0x%08x\n", axi);
+ seq_printf(m, "\tidle: 0x%08x\n", idle);
+ idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
+ if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
+ seq_puts(m, "\t FE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
+ seq_puts(m, "\t DE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
+ seq_puts(m, "\t PE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
+ seq_puts(m, "\t SH is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
+ seq_puts(m, "\t PA is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
+ seq_puts(m, "\t SE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
+ seq_puts(m, "\t RA is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
+ seq_puts(m, "\t TX is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
+ seq_puts(m, "\t VG is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
+ seq_puts(m, "\t IM is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
+ seq_puts(m, "\t FP is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
+ seq_puts(m, "\t TS is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
+ seq_puts(m, "\t BL is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
+ seq_puts(m, "\t ASYNCFE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
+ seq_puts(m, "\t MC is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
+ seq_puts(m, "\t PPA is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
+ seq_puts(m, "\t WD is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
+ seq_puts(m, "\t NN is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
+ seq_puts(m, "\t TP is not idle\n");
+ if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
+ seq_puts(m, "\t AXI low power mode\n");
+
+ if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
+ u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
+ u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
+ u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
+
+ seq_puts(m, "\tMC\n");
+ seq_printf(m, "\t read0: 0x%08x\n", read0);
+ seq_printf(m, "\t read1: 0x%08x\n", read1);
+ seq_printf(m, "\t write: 0x%08x\n", write);
+ }
+
+ seq_puts(m, "\tDMA ");
+
+ if (debug.address[0] == debug.address[1] &&
+ debug.state[0] == debug.state[1]) {
+ seq_puts(m, "seems to be stuck\n");
+ } else if (debug.address[0] == debug.address[1]) {
+ seq_puts(m, "address is constant\n");
+ } else {
+ seq_puts(m, "is running\n");
+ }
+
+ seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
+ seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
+ seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
+ seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
+ seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
+ dma_lo, dma_hi);
+
+ ret = 0;
+
+ pm_runtime_mark_last_busy(gpu->dev);
+pm_put:
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return ret;
+}
+#endif
+
+void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
+{
+ unsigned int i;
+
+ dev_err(gpu->dev, "recover hung GPU!\n");
+
+ if (pm_runtime_get_sync(gpu->dev) < 0)
+ goto pm_put;
+
+ mutex_lock(&gpu->lock);
+
+ etnaviv_hw_reset(gpu);
+
+ /* complete all events, the GPU won't do it after the reset */
+ spin_lock(&gpu->event_spinlock);
+ for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
+ complete(&gpu->event_free);
+ bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
+ spin_unlock(&gpu->event_spinlock);
+
+ etnaviv_gpu_hw_init(gpu);
+
+ mutex_unlock(&gpu->lock);
+ pm_runtime_mark_last_busy(gpu->dev);
+pm_put:
+ pm_runtime_put_autosuspend(gpu->dev);
+}
+
+/* fence object management */
+struct etnaviv_fence {
+ struct etnaviv_gpu *gpu;
+ struct dma_fence base;
+};
+
+static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct etnaviv_fence, base);
+}
+
+static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "etnaviv";
+}
+
+static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+ return dev_name(f->gpu->dev);
+}
+
+static bool etnaviv_fence_signaled(struct dma_fence *fence)
+{
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+ return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
+}
+
+static void etnaviv_fence_release(struct dma_fence *fence)
+{
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
+
+ kfree_rcu(f, base.rcu);
+}
+
+static const struct dma_fence_ops etnaviv_fence_ops = {
+ .get_driver_name = etnaviv_fence_get_driver_name,
+ .get_timeline_name = etnaviv_fence_get_timeline_name,
+ .signaled = etnaviv_fence_signaled,
+ .release = etnaviv_fence_release,
+};
+
+static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_fence *f;
+
+ /*
+ * GPU lock must already be held, otherwise fence completion order might
+ * not match the seqno order assigned here.
+ */
+ lockdep_assert_held(&gpu->lock);
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->gpu = gpu;
+
+ dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
+ gpu->fence_context, ++gpu->next_fence);
+
+ return &f->base;
+}
+
+/* returns true if fence a comes after fence b */
+static inline bool fence_after(u32 a, u32 b)
+{
+ return (s32)(a - b) > 0;
+}
+
+/*
+ * event management:
+ */
+
+static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
+ unsigned int *events)
+{
+ unsigned long timeout = msecs_to_jiffies(10 * 10000);
+ unsigned i, acquired = 0;
+
+ for (i = 0; i < nr_events; i++) {
+ unsigned long ret;
+
+ ret = wait_for_completion_timeout(&gpu->event_free, timeout);
+
+ if (!ret) {
+ dev_err(gpu->dev, "wait_for_completion_timeout failed");
+ goto out;
+ }
+
+ acquired++;
+ timeout = ret;
+ }
+
+ spin_lock(&gpu->event_spinlock);
+
+ for (i = 0; i < nr_events; i++) {
+ int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
+
+ events[i] = event;
+ memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
+ set_bit(event, gpu->event_bitmap);
+ }
+
+ spin_unlock(&gpu->event_spinlock);
+
+ return 0;
+
+out:
+ for (i = 0; i < acquired; i++)
+ complete(&gpu->event_free);
+
+ return -EBUSY;
+}
+
+static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
+{
+ if (!test_bit(event, gpu->event_bitmap)) {
+ dev_warn(gpu->dev, "event %u is already marked as free",
+ event);
+ } else {
+ clear_bit(event, gpu->event_bitmap);
+ complete(&gpu->event_free);
+ }
+}
+
+/*
+ * Cmdstream submission/retirement:
+ */
+int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
+ u32 id, struct drm_etnaviv_timespec *timeout)
+{
+ struct dma_fence *fence;
+ int ret;
+
+ /*
+ * Look up the fence and take a reference. We might still find a fence
+ * whose refcount has already dropped to zero. dma_fence_get_rcu
+ * pretends we didn't find a fence in that case.
+ */
+ rcu_read_lock();
+ fence = idr_find(&gpu->fence_idr, id);
+ if (fence)
+ fence = dma_fence_get_rcu(fence);
+ rcu_read_unlock();
+
+ if (!fence)
+ return 0;
+
+ if (!timeout) {
+ /* No timeout was requested: just test for completion */
+ ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
+ } else {
+ unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
+
+ ret = dma_fence_wait_timeout(fence, true, remaining);
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret != -ERESTARTSYS)
+ ret = 0;
+
+ }
+
+ dma_fence_put(fence);
+ return ret;
+}
+
+/*
+ * Wait for an object to become inactive. This, on it's own, is not race
+ * free: the object is moved by the scheduler off the active list, and
+ * then the iova is put. Moreover, the object could be re-submitted just
+ * after we notice that it's become inactive.
+ *
+ * Although the retirement happens under the gpu lock, we don't want to hold
+ * that lock in this function while waiting.
+ */
+int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout)
+{
+ unsigned long remaining;
+ long ret;
+
+ if (!timeout)
+ return !is_active(etnaviv_obj) ? 0 : -EBUSY;
+
+ remaining = etnaviv_timeout_to_jiffies(timeout);
+
+ ret = wait_event_interruptible_timeout(gpu->fence_event,
+ !is_active(etnaviv_obj),
+ remaining);
+ if (ret > 0)
+ return 0;
+ else if (ret == -ERESTARTSYS)
+ return -ERESTARTSYS;
+ else
+ return -ETIMEDOUT;
+}
+
+static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event, unsigned int flags)
+{
+ const struct etnaviv_gem_submit *submit = event->submit;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
+
+ if (pmr->flags == flags)
+ etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
+ }
+}
+
+static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event)
+{
+ u32 val;
+
+ /* disable clock gating */
+ val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
+
+ /* enable debug register */
+ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
+
+ sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
+}
+
+static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event)
+{
+ const struct etnaviv_gem_submit *submit = event->submit;
+ unsigned int i;
+ u32 val;
+
+ sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
+
+ for (i = 0; i < submit->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
+
+ *pmr->bo_vma = pmr->sequence;
+ }
+
+ /* disable debug register */
+ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
+
+ /* enable clock gating */
+ val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
+}
+
+
+/* add bo's to gpu's ring, and kick gpu: */
+struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
+{
+ struct etnaviv_gpu *gpu = submit->gpu;
+ struct dma_fence *gpu_fence;
+ unsigned int i, nr_events = 1, event[3];
+ int ret;
+
+ if (!submit->runtime_resumed) {
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(gpu->dev);
+ return NULL;
+ }
+ submit->runtime_resumed = true;
+ }
+
+ /*
+ * if there are performance monitor requests we need to have
+ * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
+ * requests.
+ * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
+ * and update the sequence number for userspace.
+ */
+ if (submit->nr_pmrs)
+ nr_events = 3;
+
+ ret = event_alloc(gpu, nr_events, event);
+ if (ret) {
+ DRM_ERROR("no free events\n");
+ pm_runtime_put_noidle(gpu->dev);
+ return NULL;
+ }
+
+ mutex_lock(&gpu->lock);
+
+ gpu_fence = etnaviv_gpu_fence_alloc(gpu);
+ if (!gpu_fence) {
+ for (i = 0; i < nr_events; i++)
+ event_free(gpu, event[i]);
+
+ goto out_unlock;
+ }
+
+ if (!gpu->fe_running)
+ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
+
+ if (submit->nr_pmrs) {
+ gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
+ kref_get(&submit->refcount);
+ gpu->event[event[1]].submit = submit;
+ etnaviv_sync_point_queue(gpu, event[1]);
+ }
+
+ gpu->event[event[0]].fence = gpu_fence;
+ submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
+ etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
+ event[0], &submit->cmdbuf);
+
+ if (submit->nr_pmrs) {
+ gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
+ kref_get(&submit->refcount);
+ gpu->event[event[2]].submit = submit;
+ etnaviv_sync_point_queue(gpu, event[2]);
+ }
+
+out_unlock:
+ mutex_unlock(&gpu->lock);
+
+ return gpu_fence;
+}
+
+static void sync_point_worker(struct work_struct *work)
+{
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+ sync_point_work);
+ struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
+ u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+
+ event->sync_point(gpu, event);
+ etnaviv_submit_put(event->submit);
+ event_free(gpu, gpu->sync_point_event);
+
+ /* restart FE last to avoid GPU and IRQ racing against this worker */
+ etnaviv_gpu_start_fe(gpu, addr + 2, 2);
+}
+
+static void dump_mmu_fault(struct etnaviv_gpu *gpu)
+{
+ u32 status_reg, status;
+ int i;
+
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ status_reg = VIVS_MMUv2_STATUS;
+ else
+ status_reg = VIVS_MMUv2_SEC_STATUS;
+
+ status = gpu_read(gpu, status_reg);
+ dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
+
+ for (i = 0; i < 4; i++) {
+ u32 address_reg;
+
+ if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
+ continue;
+
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
+ else
+ address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
+
+ dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
+ gpu_read(gpu, address_reg));
+ }
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct etnaviv_gpu *gpu = data;
+ irqreturn_t ret = IRQ_NONE;
+
+ u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
+
+ if (intr != 0) {
+ int event;
+
+ pm_runtime_mark_last_busy(gpu->dev);
+
+ dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
+
+ if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
+ dev_err(gpu->dev, "AXI bus error\n");
+ intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
+ }
+
+ if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
+ dump_mmu_fault(gpu);
+ intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
+ }
+
+ while ((event = ffs(intr)) != 0) {
+ struct dma_fence *fence;
+
+ event -= 1;
+
+ intr &= ~(1 << event);
+
+ dev_dbg(gpu->dev, "event %u\n", event);
+
+ if (gpu->event[event].sync_point) {
+ gpu->sync_point_event = event;
+ queue_work(gpu->wq, &gpu->sync_point_work);
+ }
+
+ fence = gpu->event[event].fence;
+ if (!fence)
+ continue;
+
+ gpu->event[event].fence = NULL;
+
+ /*
+ * Events can be processed out of order. Eg,
+ * - allocate and queue event 0
+ * - allocate event 1
+ * - event 0 completes, we process it
+ * - allocate and queue event 0
+ * - event 1 and event 0 complete
+ * we can end up processing event 0 first, then 1.
+ */
+ if (fence_after(fence->seqno, gpu->completed_fence))
+ gpu->completed_fence = fence->seqno;
+ dma_fence_signal(fence);
+
+ event_free(gpu, event);
+ }
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ ret = clk_prepare_enable(gpu->clk_reg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(gpu->clk_bus);
+ if (ret)
+ goto disable_clk_reg;
+
+ ret = clk_prepare_enable(gpu->clk_core);
+ if (ret)
+ goto disable_clk_bus;
+
+ ret = clk_prepare_enable(gpu->clk_shader);
+ if (ret)
+ goto disable_clk_core;
+
+ return 0;
+
+disable_clk_core:
+ clk_disable_unprepare(gpu->clk_core);
+disable_clk_bus:
+ clk_disable_unprepare(gpu->clk_bus);
+disable_clk_reg:
+ clk_disable_unprepare(gpu->clk_reg);
+
+ return ret;
+}
+
+static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
+{
+ clk_disable_unprepare(gpu->clk_shader);
+ clk_disable_unprepare(gpu->clk_core);
+ clk_disable_unprepare(gpu->clk_bus);
+ clk_disable_unprepare(gpu->clk_reg);
+
+ return 0;
+}
+
+int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ do {
+ u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+ if ((idle & gpu->idle_mask) == gpu->idle_mask)
+ return 0;
+
+ if (time_is_before_jiffies(timeout)) {
+ dev_warn(gpu->dev,
+ "timed out waiting for idle: idle=0x%x\n",
+ idle);
+ return -ETIMEDOUT;
+ }
+
+ udelay(5);
+ } while (1);
+}
+
+static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
+{
+ if (gpu->initialized && gpu->fe_running) {
+ /* Replace the last WAIT with END */
+ mutex_lock(&gpu->lock);
+ etnaviv_buffer_end(gpu);
+ mutex_unlock(&gpu->lock);
+
+ /*
+ * We know that only the FE is busy here, this should
+ * happen quickly (as the WAIT is only 200 cycles). If
+ * we fail, just warn and continue.
+ */
+ etnaviv_gpu_wait_idle(gpu, 100);
+
+ gpu->fe_running = false;
+ }
+
+ gpu->exec_state = -1;
+
+ return etnaviv_gpu_clk_disable(gpu);
+}
+
+#ifdef CONFIG_PM
+static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ ret = mutex_lock_killable(&gpu->lock);
+ if (ret)
+ return ret;
+
+ etnaviv_gpu_update_clock(gpu);
+ etnaviv_gpu_hw_init(gpu);
+
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+#endif
+
+static int
+etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 6;
+
+ return 0;
+}
+
+static int
+etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct etnaviv_gpu *gpu = cdev->devdata;
+
+ *state = gpu->freq_scale;
+
+ return 0;
+}
+
+static int
+etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct etnaviv_gpu *gpu = cdev->devdata;
+
+ mutex_lock(&gpu->lock);
+ gpu->freq_scale = state;
+ if (!pm_runtime_suspended(gpu->dev))
+ etnaviv_gpu_update_clock(gpu);
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops cooling_ops = {
+ .get_max_state = etnaviv_gpu_cooling_get_max_state,
+ .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
+ .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
+};
+
+static int etnaviv_gpu_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct etnaviv_drm_private *priv = drm->dev_private;
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+ int ret;
+
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
+ gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+ (char *)dev_name(dev), gpu, &cooling_ops);
+ if (IS_ERR(gpu->cooling))
+ return PTR_ERR(gpu->cooling);
+ }
+
+ gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
+ if (!gpu->wq) {
+ ret = -ENOMEM;
+ goto out_thermal;
+ }
+
+ ret = etnaviv_sched_init(gpu);
+ if (ret)
+ goto out_workqueue;
+
+#ifdef CONFIG_PM
+ ret = pm_runtime_get_sync(gpu->dev);
+#else
+ ret = etnaviv_gpu_clk_enable(gpu);
+#endif
+ if (ret < 0)
+ goto out_sched;
+
+
+ gpu->drm = drm;
+ gpu->fence_context = dma_fence_context_alloc(1);
+ idr_init(&gpu->fence_idr);
+ spin_lock_init(&gpu->fence_spinlock);
+
+ INIT_WORK(&gpu->sync_point_work, sync_point_worker);
+ init_waitqueue_head(&gpu->fence_event);
+
+ priv->gpu[priv->num_gpus++] = gpu;
+
+ pm_runtime_mark_last_busy(gpu->dev);
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return 0;
+
+out_sched:
+ etnaviv_sched_fini(gpu);
+
+out_workqueue:
+ destroy_workqueue(gpu->wq);
+
+out_thermal:
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+ thermal_cooling_device_unregister(gpu->cooling);
+
+ return ret;
+}
+
+static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+
+ DBG("%s", dev_name(gpu->dev));
+
+ destroy_workqueue(gpu->wq);
+
+ etnaviv_sched_fini(gpu);
+
+#ifdef CONFIG_PM
+ pm_runtime_get_sync(gpu->dev);
+ pm_runtime_put_sync_suspend(gpu->dev);
+#else
+ etnaviv_gpu_hw_suspend(gpu);
+#endif
+
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+
+ if (gpu->initialized) {
+ etnaviv_cmdbuf_free(&gpu->buffer);
+ etnaviv_iommu_global_fini(gpu);
+ gpu->initialized = false;
+ }
+
+ gpu->drm = NULL;
+ idr_destroy(&gpu->fence_idr);
+
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+ thermal_cooling_device_unregister(gpu->cooling);
+ gpu->cooling = NULL;
+}
+
+static const struct component_ops gpu_ops = {
+ .bind = etnaviv_gpu_bind,
+ .unbind = etnaviv_gpu_unbind,
+};
+
+static const struct of_device_id etnaviv_gpu_match[] = {
+ {
+ .compatible = "vivante,gc"
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
+
+static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct etnaviv_gpu *gpu;
+ int err;
+
+ gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
+ if (!gpu)
+ return -ENOMEM;
+
+ gpu->dev = &pdev->dev;
+ mutex_init(&gpu->lock);
+ mutex_init(&gpu->fence_lock);
+
+ /* Map registers: */
+ gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpu->mmio))
+ return PTR_ERR(gpu->mmio);
+
+ /* Get Interrupt: */
+ gpu->irq = platform_get_irq(pdev, 0);
+ if (gpu->irq < 0)
+ return gpu->irq;
+
+ err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
+ dev_name(gpu->dev), gpu);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
+ return err;
+ }
+
+ /* Get Clocks: */
+ gpu->clk_reg = devm_clk_get_optional(&pdev->dev, "reg");
+ DBG("clk_reg: %p", gpu->clk_reg);
+ if (IS_ERR(gpu->clk_reg))
+ return PTR_ERR(gpu->clk_reg);
+
+ gpu->clk_bus = devm_clk_get_optional(&pdev->dev, "bus");
+ DBG("clk_bus: %p", gpu->clk_bus);
+ if (IS_ERR(gpu->clk_bus))
+ return PTR_ERR(gpu->clk_bus);
+
+ gpu->clk_core = devm_clk_get(&pdev->dev, "core");
+ DBG("clk_core: %p", gpu->clk_core);
+ if (IS_ERR(gpu->clk_core))
+ return PTR_ERR(gpu->clk_core);
+ gpu->base_rate_core = clk_get_rate(gpu->clk_core);
+
+ gpu->clk_shader = devm_clk_get_optional(&pdev->dev, "shader");
+ DBG("clk_shader: %p", gpu->clk_shader);
+ if (IS_ERR(gpu->clk_shader))
+ return PTR_ERR(gpu->clk_shader);
+ gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
+
+ /* TODO: figure out max mapped size */
+ dev_set_drvdata(dev, gpu);
+
+ /*
+ * We treat the device as initially suspended. The runtime PM
+ * autosuspend delay is rather arbitary: no measurements have
+ * yet been performed to determine an appropriate value.
+ */
+ pm_runtime_use_autosuspend(gpu->dev);
+ pm_runtime_set_autosuspend_delay(gpu->dev, 200);
+ pm_runtime_enable(gpu->dev);
+
+ err = component_add(&pdev->dev, &gpu_ops);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register component: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &gpu_ops);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int etnaviv_gpu_rpm_suspend(struct device *dev)
+{
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+ u32 idle, mask;
+
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&gpu->sched.hw_rq_count))
+ return -EBUSY;
+
+ /* Check whether the hardware (except FE and MC) is idle */
+ mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
+ VIVS_HI_IDLE_STATE_MC);
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
+ if (idle != mask) {
+ dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
+ idle);
+ return -EBUSY;
+ }
+
+ return etnaviv_gpu_hw_suspend(gpu);
+}
+
+static int etnaviv_gpu_rpm_resume(struct device *dev)
+{
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+ int ret;
+
+ ret = etnaviv_gpu_clk_enable(gpu);
+ if (ret)
+ return ret;
+
+ /* Re-initialise the basic hardware state */
+ if (gpu->drm && gpu->initialized) {
+ ret = etnaviv_gpu_hw_resume(gpu);
+ if (ret) {
+ etnaviv_gpu_clk_disable(gpu);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
+ NULL)
+};
+
+struct platform_driver etnaviv_gpu_driver = {
+ .driver = {
+ .name = "etnaviv-gpu",
+ .owner = THIS_MODULE,
+ .pm = &etnaviv_gpu_pm_ops,
+ .of_match_table = etnaviv_gpu_match,
+ },
+ .probe = etnaviv_gpu_platform_probe,
+ .remove = etnaviv_gpu_platform_remove,
+ .id_table = gpu_ids,
+};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
new file mode 100644
index 000000000..85eddd492
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#ifndef __ETNAVIV_GPU_H__
+#define __ETNAVIV_GPU_H__
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_drv.h"
+
+struct etnaviv_gem_submit;
+struct etnaviv_vram_mapping;
+
+struct etnaviv_chip_identity {
+ u32 model;
+ u32 revision;
+ u32 product_id;
+ u32 customer_id;
+ u32 eco_id;
+
+ /* Supported feature fields. */
+ u32 features;
+
+ /* Supported minor feature fields. */
+ u32 minor_features0;
+ u32 minor_features1;
+ u32 minor_features2;
+ u32 minor_features3;
+ u32 minor_features4;
+ u32 minor_features5;
+ u32 minor_features6;
+ u32 minor_features7;
+ u32 minor_features8;
+ u32 minor_features9;
+ u32 minor_features10;
+ u32 minor_features11;
+
+ /* Number of streams supported. */
+ u32 stream_count;
+
+ /* Total number of temporary registers per thread. */
+ u32 register_max;
+
+ /* Maximum number of threads. */
+ u32 thread_count;
+
+ /* Number of shader cores. */
+ u32 shader_core_count;
+
+ /* Size of the vertex cache. */
+ u32 vertex_cache_size;
+
+ /* Number of entries in the vertex output buffer. */
+ u32 vertex_output_buffer_size;
+
+ /* Number of pixel pipes. */
+ u32 pixel_pipes;
+
+ /* Number of instructions. */
+ u32 instruction_count;
+
+ /* Number of constants. */
+ u32 num_constants;
+
+ /* Buffer size */
+ u32 buffer_size;
+
+ /* Number of varyings */
+ u8 varyings_count;
+};
+
+enum etnaviv_sec_mode {
+ ETNA_SEC_NONE = 0,
+ ETNA_SEC_KERNEL,
+ ETNA_SEC_TZ
+};
+
+struct etnaviv_event {
+ struct dma_fence *fence;
+ struct etnaviv_gem_submit *submit;
+
+ void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
+};
+
+struct etnaviv_cmdbuf_suballoc;
+struct regulator;
+struct clk;
+
+#define ETNA_NR_EVENTS 30
+
+struct etnaviv_gpu {
+ struct drm_device *drm;
+ struct thermal_cooling_device *cooling;
+ struct device *dev;
+ struct mutex lock;
+ struct etnaviv_chip_identity identity;
+ enum etnaviv_sec_mode sec_mode;
+ struct workqueue_struct *wq;
+ struct drm_gpu_scheduler sched;
+ bool initialized;
+ bool fe_running;
+
+ /* 'ring'-buffer: */
+ struct etnaviv_cmdbuf buffer;
+ int exec_state;
+
+ /* event management: */
+ DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
+ struct etnaviv_event event[ETNA_NR_EVENTS];
+ struct completion event_free;
+ spinlock_t event_spinlock;
+
+ u32 idle_mask;
+
+ /* Fencing support */
+ struct mutex fence_lock;
+ struct idr fence_idr;
+ u32 next_fence;
+ u32 completed_fence;
+ wait_queue_head_t fence_event;
+ u64 fence_context;
+ spinlock_t fence_spinlock;
+
+ /* worker for handling 'sync' points: */
+ struct work_struct sync_point_work;
+ int sync_point_event;
+
+ /* hang detection */
+ u32 hangcheck_dma_addr;
+ u32 hangcheck_fence;
+
+ void __iomem *mmio;
+ int irq;
+
+ struct etnaviv_iommu_context *mmu_context;
+ unsigned int flush_seq;
+
+ /* Power Control: */
+ struct clk *clk_bus;
+ struct clk *clk_reg;
+ struct clk *clk_core;
+ struct clk *clk_shader;
+
+ unsigned int freq_scale;
+ unsigned long base_rate_core;
+ unsigned long base_rate_shader;
+};
+
+static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
+{
+ writel(data, gpu->mmio + reg);
+}
+
+static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
+{
+ return readl(gpu->mmio + reg);
+}
+
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
+
+int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
+bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
+
+#ifdef CONFIG_DEBUG_FS
+int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
+#endif
+
+void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
+void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
+int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
+ u32 fence, struct drm_etnaviv_timespec *timeout);
+int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout);
+struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
+int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
+void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
+int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
+void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
+
+extern struct platform_driver etnaviv_gpu_driver;
+
+#endif /* __ETNAVIV_GPU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
new file mode 100644
index 000000000..f2fc645c7
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Etnaviv Project
+ */
+
+#include "etnaviv_gpu.h"
+
+static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
+ {
+ .model = 0x400,
+ .revision = 0x4652,
+ .product_id = 0x70001,
+ .customer_id = 0x100,
+ .eco_id = 0,
+ .stream_count = 4,
+ .register_max = 64,
+ .thread_count = 128,
+ .shader_core_count = 1,
+ .vertex_cache_size = 8,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 256,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 8,
+ .features = 0xa0e9e004,
+ .minor_features0 = 0xe1299fff,
+ .minor_features1 = 0xbe13b219,
+ .minor_features2 = 0xce110010,
+ .minor_features3 = 0x8000001,
+ .minor_features4 = 0x20102,
+ .minor_features5 = 0x120000,
+ .minor_features6 = 0x0,
+ .minor_features7 = 0x0,
+ .minor_features8 = 0x0,
+ .minor_features9 = 0x0,
+ .minor_features10 = 0x0,
+ .minor_features11 = 0x0,
+ },
+ {
+ .model = 0x7000,
+ .revision = 0x6202,
+ .product_id = 0x70003,
+ .customer_id = 0,
+ .eco_id = 0,
+ .stream_count = 8,
+ .register_max = 64,
+ .thread_count = 512,
+ .shader_core_count = 2,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287cad,
+ .minor_features0 = 0xc1489eff,
+ .minor_features1 = 0xfefbfad9,
+ .minor_features2 = 0xeb9d4fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xdb0dafc7,
+ .minor_features5 = 0x3b5ac333,
+ .minor_features6 = 0xfccee201,
+ .minor_features7 = 0x03fffa6f,
+ .minor_features8 = 0x00e10ef0,
+ .minor_features9 = 0x0088003c,
+ .minor_features10 = 0x00004040,
+ .minor_features11 = 0x00000024,
+ },
+ {
+ .model = 0x7000,
+ .revision = 0x6204,
+ .product_id = ~0U,
+ .customer_id = ~0U,
+ .eco_id = 0,
+ .stream_count = 16,
+ .register_max = 64,
+ .thread_count = 512,
+ .shader_core_count = 2,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287c8d,
+ .minor_features0 = 0xc1589eff,
+ .minor_features1 = 0xfefbfad9,
+ .minor_features2 = 0xeb9d4fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xdb0dafc7,
+ .minor_features5 = 0x3b5ac333,
+ .minor_features6 = 0xfcce6000,
+ .minor_features7 = 0xfffbfa6f,
+ .minor_features8 = 0x00e10ef3,
+ .minor_features9 = 0x04c8003c,
+ .minor_features10 = 0x00004060,
+ .minor_features11 = 0x00000024,
+ },
+ {
+ .model = 0x7000,
+ .revision = 0x6214,
+ .product_id = ~0U,
+ .customer_id = ~0U,
+ .eco_id = ~0U,
+ .stream_count = 16,
+ .register_max = 64,
+ .thread_count = 1024,
+ .shader_core_count = 4,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 2,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287cad,
+ .minor_features0 = 0xc1799eff,
+ .minor_features1 = 0xfefbfad9,
+ .minor_features2 = 0xeb9d4fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xdb0dafc7,
+ .minor_features5 = 0xbb5ac333,
+ .minor_features6 = 0xfc8ee200,
+ .minor_features7 = 0x03fbfa6f,
+ .minor_features8 = 0x00ef0ef0,
+ .minor_features9 = 0x0edbf03c,
+ .minor_features10 = 0x90044250,
+ .minor_features11 = 0x00000024,
+ },
+};
+
+bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_chip_identity *ident = &gpu->identity;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
+ if (etnaviv_chip_identities[i].model == ident->model &&
+ etnaviv_chip_identities[i].revision == ident->revision &&
+ (etnaviv_chip_identities[i].product_id == ident->product_id ||
+ etnaviv_chip_identities[i].product_id == ~0U) &&
+ (etnaviv_chip_identities[i].customer_id == ident->customer_id ||
+ etnaviv_chip_identities[i].customer_id == ~0U) &&
+ (etnaviv_chip_identities[i].eco_id == ident->eco_id ||
+ etnaviv_chip_identities[i].eco_id == ~0U)) {
+ memcpy(ident, &etnaviv_chip_identities[i],
+ sizeof(*ident));
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
new file mode 100644
index 000000000..afe5dd6a9
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2018 Etnaviv Project
+ */
+
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "state_hi.xml.h"
+
+#define PT_SIZE SZ_2M
+#define PT_ENTRIES (PT_SIZE / sizeof(u32))
+
+#define GPU_MEM_START 0x80000000
+
+struct etnaviv_iommuv1_context {
+ struct etnaviv_iommu_context base;
+ u32 *pgtable_cpu;
+ dma_addr_t pgtable_dma;
+};
+
+static struct etnaviv_iommuv1_context *
+to_v1_context(struct etnaviv_iommu_context *context)
+{
+ return container_of(context, struct etnaviv_iommuv1_context, base);
+}
+
+static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
+
+ drm_mm_takedown(&context->mm);
+
+ dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
+ v1_context->pgtable_dma);
+
+ context->global->v1.shared_context = NULL;
+
+ kfree(v1_context);
+}
+
+static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
+
+ if (size != SZ_4K)
+ return -EINVAL;
+
+ v1_context->pgtable_cpu[index] = paddr;
+
+ return 0;
+}
+
+static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
+ unsigned long iova, size_t size)
+{
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
+
+ if (size != SZ_4K)
+ return -EINVAL;
+
+ v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
+
+ return SZ_4K;
+}
+
+static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
+{
+ return PT_SIZE;
+}
+
+static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
+ void *buf)
+{
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
+
+ memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
+}
+
+static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
+ u32 pgtable;
+
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
+ /* set base addresses */
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
+
+ /* set page table address in MC */
+ pgtable = (u32)v1_context->pgtable_dma;
+
+ gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
+ gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
+}
+
+
+const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
+ .free = etnaviv_iommuv1_free,
+ .map = etnaviv_iommuv1_map,
+ .unmap = etnaviv_iommuv1_unmap,
+ .dump_size = etnaviv_iommuv1_dump_size,
+ .dump = etnaviv_iommuv1_dump,
+ .restore = etnaviv_iommuv1_restore,
+};
+
+struct etnaviv_iommu_context *
+etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
+{
+ struct etnaviv_iommuv1_context *v1_context;
+ struct etnaviv_iommu_context *context;
+
+ mutex_lock(&global->lock);
+
+ /*
+ * MMUv1 does not support switching between different contexts without
+ * a stop the world operation, so we only support a single shared
+ * context with this version.
+ */
+ if (global->v1.shared_context) {
+ context = global->v1.shared_context;
+ etnaviv_iommu_context_get(context);
+ mutex_unlock(&global->lock);
+ return context;
+ }
+
+ v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
+ if (!v1_context) {
+ mutex_unlock(&global->lock);
+ return NULL;
+ }
+
+ v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
+ &v1_context->pgtable_dma,
+ GFP_KERNEL);
+ if (!v1_context->pgtable_cpu)
+ goto out_free;
+
+ memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
+
+ context = &v1_context->base;
+ context->global = global;
+ kref_init(&context->refcount);
+ mutex_init(&context->lock);
+ INIT_LIST_HEAD(&context->mappings);
+ drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
+ context->global->v1.shared_context = context;
+
+ mutex_unlock(&global->lock);
+
+ return context;
+
+out_free:
+ mutex_unlock(&global->lock);
+ kfree(v1_context);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
new file mode 100644
index 000000000..d664ae29a
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Etnaviv Project
+ */
+
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "state.xml.h"
+#include "state_hi.xml.h"
+
+#define MMUv2_PTE_PRESENT BIT(0)
+#define MMUv2_PTE_EXCEPTION BIT(1)
+#define MMUv2_PTE_WRITEABLE BIT(2)
+
+#define MMUv2_MTLB_MASK 0xffc00000
+#define MMUv2_MTLB_SHIFT 22
+#define MMUv2_STLB_MASK 0x003ff000
+#define MMUv2_STLB_SHIFT 12
+
+#define MMUv2_MAX_STLB_ENTRIES 1024
+
+struct etnaviv_iommuv2_context {
+ struct etnaviv_iommu_context base;
+ unsigned short id;
+ /* M(aster) TLB aka first level pagetable */
+ u32 *mtlb_cpu;
+ dma_addr_t mtlb_dma;
+ /* S(lave) TLB aka second level pagetable */
+ u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
+ dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
+};
+
+static struct etnaviv_iommuv2_context *
+to_v2_context(struct etnaviv_iommu_context *context)
+{
+ return container_of(context, struct etnaviv_iommuv2_context, base);
+}
+
+static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+ int i;
+
+ drm_mm_takedown(&context->mm);
+
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+ if (v2_context->stlb_cpu[i])
+ dma_free_wc(context->global->dev, SZ_4K,
+ v2_context->stlb_cpu[i],
+ v2_context->stlb_dma[i]);
+ }
+
+ dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
+ v2_context->mtlb_dma);
+
+ clear_bit(v2_context->id, context->global->v2.pta_alloc);
+
+ vfree(v2_context);
+}
+static int
+etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
+ int stlb)
+{
+ if (v2_context->stlb_cpu[stlb])
+ return 0;
+
+ v2_context->stlb_cpu[stlb] =
+ dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
+ &v2_context->stlb_dma[stlb],
+ GFP_KERNEL);
+
+ if (!v2_context->stlb_cpu[stlb])
+ return -ENOMEM;
+
+ memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
+ SZ_4K / sizeof(u32));
+
+ v2_context->mtlb_cpu[stlb] =
+ v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
+
+ return 0;
+}
+
+static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+ int mtlb_entry, stlb_entry, ret;
+ u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
+
+ if (size != SZ_4K)
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+ entry |= (upper_32_bits(paddr) & 0xff) << 4;
+
+ if (prot & ETNAVIV_PROT_WRITE)
+ entry |= MMUv2_PTE_WRITEABLE;
+
+ mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+ stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+ ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
+ if (ret)
+ return ret;
+
+ v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
+
+ return 0;
+}
+
+static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
+ unsigned long iova, size_t size)
+{
+ struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
+ int mtlb_entry, stlb_entry;
+
+ if (size != SZ_4K)
+ return -EINVAL;
+
+ mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+ stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+ etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
+
+ return SZ_4K;
+}
+
+static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+ size_t dump_size = SZ_4K;
+ int i;
+
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+ dump_size += SZ_4K;
+
+ return dump_size;
+}
+
+static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+ int i;
+
+ memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
+ buf += SZ_4K;
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
+ memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
+ buf += SZ_4K;
+ }
+}
+
+static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+ u16 prefetch;
+
+ /* If the MMU is already enabled the state is still there. */
+ if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
+ return;
+
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
+ prefetch = etnaviv_buffer_config_mmuv2(gpu,
+ (u32)v2_context->mtlb_dma,
+ (u32)context->global->bad_page_dma);
+ etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
+ prefetch);
+ etnaviv_gpu_wait_idle(gpu, 100);
+
+ gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
+}
+
+static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+ u16 prefetch;
+
+ /* If the MMU is already enabled the state is still there. */
+ if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
+ return;
+
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
+ gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
+ lower_32_bits(context->global->v2.pta_dma));
+ gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
+ upper_32_bits(context->global->v2.pta_dma));
+ gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
+
+ gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
+ lower_32_bits(context->global->bad_page_dma));
+ gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
+ lower_32_bits(context->global->bad_page_dma));
+ gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
+ VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
+ upper_32_bits(context->global->bad_page_dma)) |
+ VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
+ upper_32_bits(context->global->bad_page_dma)));
+
+ context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
+ VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
+
+ /* trigger a PTA load through the FE */
+ prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
+ etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
+ prefetch);
+ etnaviv_gpu_wait_idle(gpu, 100);
+
+ gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
+}
+
+u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+
+ return v2_context->mtlb_dma;
+}
+
+unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+
+ return v2_context->id;
+}
+static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
+{
+ switch (gpu->sec_mode) {
+ case ETNA_SEC_NONE:
+ etnaviv_iommuv2_restore_nonsec(gpu, context);
+ break;
+ case ETNA_SEC_KERNEL:
+ etnaviv_iommuv2_restore_sec(gpu, context);
+ break;
+ default:
+ WARN(1, "unhandled GPU security mode\n");
+ break;
+ }
+}
+
+const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
+ .free = etnaviv_iommuv2_free,
+ .map = etnaviv_iommuv2_map,
+ .unmap = etnaviv_iommuv2_unmap,
+ .dump_size = etnaviv_iommuv2_dump_size,
+ .dump = etnaviv_iommuv2_dump,
+ .restore = etnaviv_iommuv2_restore,
+};
+
+struct etnaviv_iommu_context *
+etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
+{
+ struct etnaviv_iommuv2_context *v2_context;
+ struct etnaviv_iommu_context *context;
+
+ v2_context = vzalloc(sizeof(*v2_context));
+ if (!v2_context)
+ return NULL;
+
+ mutex_lock(&global->lock);
+ v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
+ ETNAVIV_PTA_ENTRIES);
+ if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
+ set_bit(v2_context->id, global->v2.pta_alloc);
+ } else {
+ mutex_unlock(&global->lock);
+ goto out_free;
+ }
+ mutex_unlock(&global->lock);
+
+ v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
+ &v2_context->mtlb_dma, GFP_KERNEL);
+ if (!v2_context->mtlb_cpu)
+ goto out_free_id;
+
+ memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
+ MMUv2_MAX_STLB_ENTRIES);
+
+ global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma;
+
+ context = &v2_context->base;
+ context->global = global;
+ kref_init(&context->refcount);
+ mutex_init(&context->lock);
+ INIT_LIST_HEAD(&context->mappings);
+ drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
+
+ return context;
+
+out_free_id:
+ clear_bit(v2_context->id, global->v2.pta_alloc);
+out_free:
+ vfree(v2_context);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
new file mode 100644
index 000000000..67bdce532
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+#include "common.xml.h"
+#include "etnaviv_cmdbuf.h"
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
+ unsigned long iova, size_t size)
+{
+ size_t unmapped_page, unmapped = 0;
+ size_t pgsize = SZ_4K;
+
+ if (!IS_ALIGNED(iova | size, pgsize)) {
+ pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
+ iova, size, pgsize);
+ return;
+ }
+
+ while (unmapped < size) {
+ unmapped_page = context->global->ops->unmap(context, iova,
+ pgsize);
+ if (!unmapped_page)
+ break;
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+}
+
+static int etnaviv_context_map(struct etnaviv_iommu_context *context,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ unsigned long orig_iova = iova;
+ size_t pgsize = SZ_4K;
+ size_t orig_size = size;
+ int ret = 0;
+
+ if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
+ pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
+ iova, &paddr, size, pgsize);
+ return -EINVAL;
+ }
+
+ while (size) {
+ ret = context->global->ops->map(context, iova, paddr, pgsize,
+ prot);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ etnaviv_context_unmap(context, orig_iova, orig_size - size);
+
+ return ret;
+}
+
+static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i;
+ int ret;
+
+ if (!context || !sgt)
+ return -EINVAL;
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ phys_addr_t pa = sg_dma_address(sg) - sg->offset;
+ size_t bytes = sg_dma_len(sg) + sg->offset;
+
+ VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
+
+ ret = etnaviv_context_map(context, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ context->flush_seq++;
+
+ return 0;
+
+fail:
+ etnaviv_context_unmap(context, iova, da - iova);
+ return ret;
+}
+
+static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
+ struct sg_table *sgt, unsigned len)
+{
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ size_t bytes = sg_dma_len(sg) + sg->offset;
+
+ etnaviv_context_unmap(context, da, bytes);
+
+ VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+
+ BUG_ON(!PAGE_ALIGNED(bytes));
+
+ da += bytes;
+ }
+
+ context->flush_seq++;
+}
+
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
+{
+ struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+ lockdep_assert_held(&context->lock);
+
+ etnaviv_iommu_unmap(context, mapping->vram_node.start,
+ etnaviv_obj->sgt, etnaviv_obj->base.size);
+ drm_mm_remove_node(&mapping->vram_node);
+}
+
+void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
+{
+ struct etnaviv_iommu_context *context = mapping->context;
+
+ lockdep_assert_held(&context->lock);
+ WARN_ON(mapping->use);
+
+ etnaviv_iommu_remove_mapping(context, mapping);
+ etnaviv_iommu_context_put(mapping->context);
+ mapping->context = NULL;
+ list_del_init(&mapping->mmu_node);
+}
+
+static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
+ struct drm_mm_node *node, size_t size)
+{
+ struct etnaviv_vram_mapping *free = NULL;
+ enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
+ int ret;
+
+ lockdep_assert_held(&context->lock);
+
+ while (1) {
+ struct etnaviv_vram_mapping *m, *n;
+ struct drm_mm_scan scan;
+ struct list_head list;
+ bool found;
+
+ ret = drm_mm_insert_node_in_range(&context->mm, node,
+ size, 0, 0, 0, U64_MAX, mode);
+ if (ret != -ENOSPC)
+ break;
+
+ /* Try to retire some entries */
+ drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
+
+ found = 0;
+ INIT_LIST_HEAD(&list);
+ list_for_each_entry(free, &context->mappings, mmu_node) {
+ /* If this vram node has not been used, skip this. */
+ if (!free->vram_node.mm)
+ continue;
+
+ /*
+ * If the iova is pinned, then it's in-use,
+ * so we must keep its mapping.
+ */
+ if (free->use)
+ continue;
+
+ list_add(&free->scan_node, &list);
+ if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* Nothing found, clean up and fail */
+ list_for_each_entry_safe(m, n, &list, scan_node)
+ BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
+ break;
+ }
+
+ /*
+ * drm_mm does not allow any other operations while
+ * scanning, so we have to remove all blocks first.
+ * If drm_mm_scan_remove_block() returns false, we
+ * can leave the block pinned.
+ */
+ list_for_each_entry_safe(m, n, &list, scan_node)
+ if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
+ list_del_init(&m->scan_node);
+
+ /*
+ * Unmap the blocks which need to be reaped from the MMU.
+ * Clear the mmu pointer to prevent the mapping_get finding
+ * this mapping.
+ */
+ list_for_each_entry_safe(m, n, &list, scan_node) {
+ etnaviv_iommu_reap_mapping(m);
+ list_del_init(&m->scan_node);
+ }
+
+ mode = DRM_MM_INSERT_EVICT;
+
+ /*
+ * We removed enough mappings so that the new allocation will
+ * succeed, retry the allocation one more time.
+ */
+ }
+
+ return ret;
+}
+
+static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
+ struct drm_mm_node *node, size_t size, u64 va)
+{
+ struct etnaviv_vram_mapping *m, *n;
+ struct drm_mm_node *scan_node;
+ LIST_HEAD(scan_list);
+ int ret;
+
+ lockdep_assert_held(&context->lock);
+
+ ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
+ va + size, DRM_MM_INSERT_LOWEST);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /*
+ * When we can't insert the node, due to a existing mapping blocking
+ * the address space, there are two possible reasons:
+ * 1. Userspace genuinely messed up and tried to reuse address space
+ * before the last job using this VMA has finished executing.
+ * 2. The existing buffer mappings are idle, but the buffers are not
+ * destroyed yet (likely due to being referenced by another context) in
+ * which case the mappings will not be cleaned up and we must reap them
+ * here to make space for the new mapping.
+ */
+
+ drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
+ m = container_of(scan_node, struct etnaviv_vram_mapping,
+ vram_node);
+
+ if (m->use)
+ return -ENOSPC;
+
+ list_add(&m->scan_node, &scan_list);
+ }
+
+ list_for_each_entry_safe(m, n, &scan_list, scan_node) {
+ etnaviv_iommu_reap_mapping(m);
+ list_del_init(&m->scan_node);
+ }
+
+ return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
+ va + size, DRM_MM_INSERT_LOWEST);
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
+ struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+ struct etnaviv_vram_mapping *mapping, u64 va)
+{
+ struct sg_table *sgt = etnaviv_obj->sgt;
+ struct drm_mm_node *node;
+ int ret;
+
+ lockdep_assert_held(&etnaviv_obj->lock);
+
+ mutex_lock(&context->lock);
+
+ /* v1 MMU can optimize single entry (contiguous) scatterlists */
+ if (context->global->version == ETNAVIV_IOMMU_V1 &&
+ sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
+ u32 iova;
+
+ iova = sg_dma_address(sgt->sgl) - memory_base;
+ if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
+ mapping->iova = iova;
+ mapping->context = etnaviv_iommu_context_get(context);
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ ret = 0;
+ goto unlock;
+ }
+ }
+
+ node = &mapping->vram_node;
+
+ if (va)
+ ret = etnaviv_iommu_insert_exact(context, node,
+ etnaviv_obj->base.size, va);
+ else
+ ret = etnaviv_iommu_find_iova(context, node,
+ etnaviv_obj->base.size);
+ if (ret < 0)
+ goto unlock;
+
+ mapping->iova = node->start;
+ ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
+ ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
+
+ if (ret < 0) {
+ drm_mm_remove_node(node);
+ goto unlock;
+ }
+
+ mapping->context = etnaviv_iommu_context_get(context);
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+unlock:
+ mutex_unlock(&context->lock);
+
+ return ret;
+}
+
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
+{
+ WARN_ON(mapping->use);
+
+ mutex_lock(&context->lock);
+
+ /* Bail if the mapping has been reaped by another thread */
+ if (!mapping->context) {
+ mutex_unlock(&context->lock);
+ return;
+ }
+
+ /* If the vram node is on the mm, unmap and remove the node */
+ if (mapping->vram_node.mm == &context->mm)
+ etnaviv_iommu_remove_mapping(context, mapping);
+
+ list_del(&mapping->mmu_node);
+ mutex_unlock(&context->lock);
+ etnaviv_iommu_context_put(context);
+}
+
+static void etnaviv_iommu_context_free(struct kref *kref)
+{
+ struct etnaviv_iommu_context *context =
+ container_of(kref, struct etnaviv_iommu_context, refcount);
+
+ etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
+
+ context->global->ops->free(context);
+}
+void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
+{
+ kref_put(&context->refcount, etnaviv_iommu_context_free);
+}
+
+struct etnaviv_iommu_context *
+etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ struct etnaviv_cmdbuf_suballoc *suballoc)
+{
+ struct etnaviv_iommu_context *ctx;
+ int ret;
+
+ if (global->version == ETNAVIV_IOMMU_V1)
+ ctx = etnaviv_iommuv1_context_alloc(global);
+ else
+ ctx = etnaviv_iommuv2_context_alloc(global);
+
+ if (!ctx)
+ return NULL;
+
+ ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
+ global->memory_base);
+ if (ret)
+ goto out_free;
+
+ if (global->version == ETNAVIV_IOMMU_V1 &&
+ ctx->cmdbuf_mapping.iova > 0x80000000) {
+ dev_err(global->dev,
+ "command buffer outside valid memory window\n");
+ goto out_unmap;
+ }
+
+ return ctx;
+
+out_unmap:
+ etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
+out_free:
+ global->ops->free(ctx);
+ return NULL;
+}
+
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
+{
+ context->global->ops->restore(gpu, context);
+}
+
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base, dma_addr_t paddr,
+ size_t size)
+{
+ mutex_lock(&context->lock);
+
+ if (mapping->use > 0) {
+ mapping->use++;
+ mutex_unlock(&context->lock);
+ return 0;
+ }
+
+ /*
+ * For MMUv1 we don't add the suballoc region to the pagetables, as
+ * those GPUs can only work with cmdbufs accessed through the linear
+ * window. Instead we manufacture a mapping to make it look uniform
+ * to the upper layers.
+ */
+ if (context->global->version == ETNAVIV_IOMMU_V1) {
+ mapping->iova = paddr - memory_base;
+ } else {
+ struct drm_mm_node *node = &mapping->vram_node;
+ int ret;
+
+ ret = etnaviv_iommu_find_iova(context, node, size);
+ if (ret < 0) {
+ mutex_unlock(&context->lock);
+ return ret;
+ }
+
+ mapping->iova = node->start;
+ ret = etnaviv_context_map(context, node->start, paddr, size,
+ ETNAVIV_PROT_READ);
+ if (ret < 0) {
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
+ return ret;
+ }
+
+ context->flush_seq++;
+ }
+
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ mapping->use = 1;
+
+ mutex_unlock(&context->lock);
+
+ return 0;
+}
+
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
+{
+ struct drm_mm_node *node = &mapping->vram_node;
+
+ mutex_lock(&context->lock);
+ mapping->use--;
+
+ if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
+ mutex_unlock(&context->lock);
+ return;
+ }
+
+ etnaviv_context_unmap(context, node->start, node->size);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
+}
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
+{
+ return context->global->ops->dump_size(context);
+}
+
+void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
+{
+ context->global->ops->dump(context, buf);
+}
+
+int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
+{
+ enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global;
+ struct device *dev = gpu->drm->dev;
+
+ if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
+ version = ETNAVIV_IOMMU_V2;
+
+ if (priv->mmu_global) {
+ if (priv->mmu_global->version != version) {
+ dev_err(gpu->dev,
+ "MMU version doesn't match global version\n");
+ return -ENXIO;
+ }
+
+ priv->mmu_global->use++;
+ return 0;
+ }
+
+ global = kzalloc(sizeof(*global), GFP_KERNEL);
+ if (!global)
+ return -ENOMEM;
+
+ global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
+ GFP_KERNEL);
+ if (!global->bad_page_cpu)
+ goto free_global;
+
+ memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
+
+ if (version == ETNAVIV_IOMMU_V2) {
+ global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
+ &global->v2.pta_dma, GFP_KERNEL);
+ if (!global->v2.pta_cpu)
+ goto free_bad_page;
+ }
+
+ global->dev = dev;
+ global->version = version;
+ global->use = 1;
+ mutex_init(&global->lock);
+
+ if (version == ETNAVIV_IOMMU_V1)
+ global->ops = &etnaviv_iommuv1_ops;
+ else
+ global->ops = &etnaviv_iommuv2_ops;
+
+ priv->mmu_global = global;
+
+ return 0;
+
+free_bad_page:
+ dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
+free_global:
+ kfree(global);
+
+ return -ENOMEM;
+}
+
+void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global = priv->mmu_global;
+
+ if (--global->use > 0)
+ return;
+
+ if (global->v2.pta_cpu)
+ dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
+ global->v2.pta_cpu, global->v2.pta_dma);
+
+ if (global->bad_page_cpu)
+ dma_free_wc(global->dev, SZ_4K,
+ global->bad_page_cpu, global->bad_page_dma);
+
+ mutex_destroy(&global->lock);
+ kfree(global);
+
+ priv->mmu_global = NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
new file mode 100644
index 000000000..c01a147f0
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2018 Etnaviv Project
+ */
+
+#ifndef __ETNAVIV_MMU_H__
+#define __ETNAVIV_MMU_H__
+
+#define ETNAVIV_PROT_READ (1 << 0)
+#define ETNAVIV_PROT_WRITE (1 << 1)
+
+enum etnaviv_iommu_version {
+ ETNAVIV_IOMMU_V1 = 0,
+ ETNAVIV_IOMMU_V2,
+};
+
+struct etnaviv_gpu;
+struct etnaviv_vram_mapping;
+struct etnaviv_iommu_global;
+struct etnaviv_iommu_context;
+
+struct etnaviv_iommu_ops {
+ struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *);
+ void (*free)(struct etnaviv_iommu_context *);
+ int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
+ size_t size);
+ size_t (*dump_size)(struct etnaviv_iommu_context *);
+ void (*dump)(struct etnaviv_iommu_context *, void *);
+ void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *);
+};
+
+extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops;
+extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops;
+
+#define ETNAVIV_PTA_SIZE SZ_4K
+#define ETNAVIV_PTA_ENTRIES (ETNAVIV_PTA_SIZE / sizeof(u64))
+
+struct etnaviv_iommu_global {
+ struct device *dev;
+ enum etnaviv_iommu_version version;
+ const struct etnaviv_iommu_ops *ops;
+ unsigned int use;
+ struct mutex lock;
+
+ void *bad_page_cpu;
+ dma_addr_t bad_page_dma;
+
+ u32 memory_base;
+
+ /*
+ * This union holds members needed by either MMUv1 or MMUv2, which
+ * can not exist at the same time.
+ */
+ union {
+ struct {
+ struct etnaviv_iommu_context *shared_context;
+ } v1;
+ struct {
+ /* P(age) T(able) A(rray) */
+ u64 *pta_cpu;
+ dma_addr_t pta_dma;
+ struct spinlock pta_lock;
+ DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
+ } v2;
+ };
+};
+
+struct etnaviv_iommu_context {
+ struct kref refcount;
+ struct etnaviv_iommu_global *global;
+
+ /* memory manager for GPU address area */
+ struct mutex lock;
+ struct list_head mappings;
+ struct drm_mm mm;
+ unsigned int flush_seq;
+
+ /* Not part of the context, but needs to have the same lifetime */
+ struct etnaviv_vram_mapping cmdbuf_mapping;
+};
+
+int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu);
+
+struct etnaviv_gem_object;
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
+ struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+ struct etnaviv_vram_mapping *mapping, u64 va);
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping);
+void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping);
+
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base, dma_addr_t paddr,
+ size_t size);
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx,
+ struct etnaviv_vram_mapping *mapping);
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx);
+void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
+
+struct etnaviv_iommu_context *
+etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ struct etnaviv_cmdbuf_suballoc *suballoc);
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+{
+ kref_get(&ctx->refcount);
+ return ctx;
+}
+void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *ctx);
+
+struct etnaviv_iommu_context *
+etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global);
+struct etnaviv_iommu_context *
+etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global);
+
+u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context);
+unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context);
+
+#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
new file mode 100644
index 000000000..bafdfe49c
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#include "common.xml.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_perfmon.h"
+#include "state_hi.xml.h"
+
+struct etnaviv_pm_domain;
+
+struct etnaviv_pm_signal {
+ char name[64];
+ u32 data;
+
+ u32 (*sample)(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal);
+};
+
+struct etnaviv_pm_domain {
+ char name[64];
+
+ /* profile register */
+ u32 profile_read;
+ u32 profile_config;
+
+ u8 nr_signals;
+ const struct etnaviv_pm_signal *signal;
+};
+
+struct etnaviv_pm_domain_meta {
+ unsigned int feature;
+ const struct etnaviv_pm_domain *domains;
+ u32 nr_domains;
+};
+
+static u32 perf_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ gpu_write(gpu, domain->profile_config, signal->data);
+
+ return gpu_read(gpu, domain->profile_read);
+}
+
+static inline void pipe_select(struct etnaviv_gpu *gpu, u32 clock, unsigned pipe)
+{
+ clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
+ clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(pipe);
+
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
+}
+
+static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ u32 value = 0;
+ unsigned i;
+
+ for (i = 0; i < gpu->identity.pixel_pipes; i++) {
+ pipe_select(gpu, clock, i);
+ value += perf_reg_read(gpu, domain, signal);
+ }
+
+ /* switch back to pixel pipe 0 to prevent GPU hang */
+ pipe_select(gpu, clock, 0);
+
+ return value;
+}
+
+static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ u32 value = 0;
+ unsigned i;
+
+ for (i = 0; i < gpu->identity.pixel_pipes; i++) {
+ pipe_select(gpu, clock, i);
+ value += gpu_read(gpu, signal->data);
+ }
+
+ /* switch back to pixel pipe 0 to prevent GPU hang */
+ pipe_select(gpu, clock, 0);
+
+ return value;
+}
+
+static u32 hi_total_cycle_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
+
+ if (gpu->identity.model == chipModel_GC880 ||
+ gpu->identity.model == chipModel_GC2000 ||
+ gpu->identity.model == chipModel_GC2100)
+ reg = VIVS_MC_PROFILE_CYCLE_COUNTER;
+
+ return gpu_read(gpu, reg);
+}
+
+static u32 hi_total_idle_cycle_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 reg = VIVS_HI_PROFILE_IDLE_CYCLES;
+
+ if (gpu->identity.model == chipModel_GC880 ||
+ gpu->identity.model == chipModel_GC2000 ||
+ gpu->identity.model == chipModel_GC2100)
+ reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
+
+ return gpu_read(gpu, reg);
+}
+
+static const struct etnaviv_pm_domain doms_3d[] = {
+ {
+ .name = "HI",
+ .profile_read = VIVS_MC_PROFILE_HI_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG2,
+ .nr_signals = 7,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_READ_BYTES8",
+ VIVS_HI_PROFILE_READ_BYTES8,
+ &pipe_reg_read,
+ },
+ {
+ "TOTAL_WRITE_BYTES8",
+ VIVS_HI_PROFILE_WRITE_BYTES8,
+ &pipe_reg_read,
+ },
+ {
+ "TOTAL_CYCLES",
+ 0,
+ &hi_total_cycle_read
+ },
+ {
+ "IDLE_CYCLES",
+ 0,
+ &hi_total_idle_cycle_read
+ },
+ {
+ "AXI_CYCLES_READ_REQUEST_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED,
+ &perf_reg_read
+ },
+ {
+ "AXI_CYCLES_WRITE_REQUEST_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED,
+ &perf_reg_read
+ },
+ {
+ "AXI_CYCLES_WRITE_DATA_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "PE",
+ .profile_read = VIVS_MC_PROFILE_PE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 4,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE,
+ &pipe_perf_reg_read
+ },
+ {
+ "PIXEL_COUNT_KILLED_BY_DEPTH_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE,
+ &pipe_perf_reg_read
+ },
+ {
+ "PIXEL_COUNT_DRAWN_BY_COLOR_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE,
+ &pipe_perf_reg_read
+ },
+ {
+ "PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE,
+ &pipe_perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "SH",
+ .profile_read = VIVS_MC_PROFILE_SH_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 9,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "SHADER_CYCLES",
+ VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES,
+ &perf_reg_read
+ },
+ {
+ "PS_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "RENDERED_PIXEL_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "VS_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER,
+ &pipe_perf_reg_read
+ },
+ {
+ "RENDERED_VERTICE_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER,
+ &pipe_perf_reg_read
+ },
+ {
+ "VTX_BRANCH_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER,
+ &pipe_perf_reg_read
+ },
+ {
+ "VTX_TEXLD_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER,
+ &pipe_perf_reg_read
+ },
+ {
+ "PXL_BRANCH_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER,
+ &pipe_perf_reg_read
+ },
+ {
+ "PXL_TEXLD_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER,
+ &pipe_perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "PA",
+ .profile_read = VIVS_MC_PROFILE_PA_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 6,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "INPUT_VTX_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "INPUT_PRIM_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "OUTPUT_PRIM_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "DEPTH_CLIPPED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER,
+ &pipe_perf_reg_read
+ },
+ {
+ "TRIVIAL_REJECTED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER,
+ &pipe_perf_reg_read
+ },
+ {
+ "CULLED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER,
+ &pipe_perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "SE",
+ .profile_read = VIVS_MC_PROFILE_SE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 2,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "CULLED_TRIANGLE_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CULLED_LINES_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "RA",
+ .profile_read = VIVS_MC_PROFILE_RA_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 7,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "VALID_PIXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_QUAD_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT,
+ &perf_reg_read
+ },
+ {
+ "VALID_QUAD_COUNT_AFTER_EARLY_Z",
+ VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_PRIMITIVE_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT,
+ &perf_reg_read
+ },
+ {
+ "PIPE_CACHE_MISS_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "PREFETCH_CACHE_MISS_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "CULLED_QUAD_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "TX",
+ .profile_read = VIVS_MC_PROFILE_TX_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 9,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_BILINEAR_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_TRILINEAR_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_DISCARDED_TEXTURE_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_TEXTURE_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "MEM_READ_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT,
+ &perf_reg_read
+ },
+ {
+ "MEM_READ_IN_8B_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_MISS_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_HIT_TEXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_MISS_TEXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "MC",
+ .profile_read = VIVS_MC_PROFILE_MC_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG2,
+ .nr_signals = 3,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_READ_REQ_8B_FROM_PIPELINE",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_READ_REQ_8B_FROM_IP",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_WRITE_REQ_8B_FROM_PIPELINE",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE,
+ &perf_reg_read
+ }
+ }
+ }
+};
+
+static const struct etnaviv_pm_domain doms_2d[] = {
+ {
+ .name = "PE",
+ .profile_read = VIVS_MC_PROFILE_PE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 1,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "PIXELS_RENDERED_2D",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D,
+ &pipe_perf_reg_read
+ }
+ }
+ }
+};
+
+static const struct etnaviv_pm_domain doms_vg[] = {
+};
+
+static const struct etnaviv_pm_domain_meta doms_meta[] = {
+ {
+ .feature = chipFeatures_PIPE_3D,
+ .nr_domains = ARRAY_SIZE(doms_3d),
+ .domains = &doms_3d[0]
+ },
+ {
+ .feature = chipFeatures_PIPE_2D,
+ .nr_domains = ARRAY_SIZE(doms_2d),
+ .domains = &doms_2d[0]
+ },
+ {
+ .feature = chipFeatures_PIPE_VG,
+ .nr_domains = ARRAY_SIZE(doms_vg),
+ .domains = &doms_vg[0]
+ }
+};
+
+static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
+{
+ unsigned int num = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+ if (gpu->identity.features & meta->feature)
+ num += meta->nr_domains;
+ }
+
+ return num;
+}
+
+static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
+ unsigned int index)
+{
+ const struct etnaviv_pm_domain *domain = NULL;
+ unsigned int offset = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+ if (!(gpu->identity.features & meta->feature))
+ continue;
+
+ if (index - offset >= meta->nr_domains) {
+ offset += meta->nr_domains;
+ continue;
+ }
+
+ domain = meta->domains + (index - offset);
+ }
+
+ return domain;
+}
+
+int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_domain *domain)
+{
+ const unsigned int nr_domains = num_pm_domains(gpu);
+ const struct etnaviv_pm_domain *dom;
+
+ if (domain->iter >= nr_domains)
+ return -EINVAL;
+
+ dom = pm_domain(gpu, domain->iter);
+ if (!dom)
+ return -EINVAL;
+
+ domain->id = domain->iter;
+ domain->nr_signals = dom->nr_signals;
+ strncpy(domain->name, dom->name, sizeof(domain->name));
+
+ domain->iter++;
+ if (domain->iter == nr_domains)
+ domain->iter = 0xff;
+
+ return 0;
+}
+
+int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_signal *signal)
+{
+ const unsigned int nr_domains = num_pm_domains(gpu);
+ const struct etnaviv_pm_domain *dom;
+ const struct etnaviv_pm_signal *sig;
+
+ if (signal->domain >= nr_domains)
+ return -EINVAL;
+
+ dom = pm_domain(gpu, signal->domain);
+ if (!dom)
+ return -EINVAL;
+
+ if (signal->iter >= dom->nr_signals)
+ return -EINVAL;
+
+ sig = &dom->signal[signal->iter];
+
+ signal->id = signal->iter;
+ strncpy(signal->name, sig->name, sizeof(signal->name));
+
+ signal->iter++;
+ if (signal->iter == dom->nr_signals)
+ signal->iter = 0xffff;
+
+ return 0;
+}
+
+int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
+ u32 exec_state)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
+ const struct etnaviv_pm_domain *dom;
+
+ if (r->domain >= meta->nr_domains)
+ return -EINVAL;
+
+ dom = meta->domains + r->domain;
+
+ if (r->signal >= dom->nr_signals)
+ return -EINVAL;
+
+ return 0;
+}
+
+void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
+ const struct etnaviv_perfmon_request *pmr, u32 exec_state)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
+ const struct etnaviv_pm_domain *dom;
+ const struct etnaviv_pm_signal *sig;
+ u32 *bo = pmr->bo_vma;
+ u32 val;
+
+ dom = meta->domains + pmr->domain;
+ sig = &dom->signal[pmr->signal];
+ val = sig->sample(gpu, dom, sig);
+
+ *(bo + pmr->offset) = val;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
new file mode 100644
index 000000000..4a9d508f6
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#ifndef __ETNAVIV_PERFMON_H__
+#define __ETNAVIV_PERFMON_H__
+
+struct etnaviv_gpu;
+struct drm_etnaviv_pm_domain;
+struct drm_etnaviv_pm_signal;
+
+struct etnaviv_perfmon_request
+{
+ u32 flags;
+ u8 domain;
+ u8 signal;
+ u32 sequence;
+
+ /* bo to store a value */
+ u32 *bo_vma;
+ u32 offset;
+};
+
+int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_domain *domain);
+
+int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_signal *signal);
+
+int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
+ u32 exec_state);
+
+void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
+ const struct etnaviv_perfmon_request *pmr, u32 exec_state);
+
+#endif /* __ETNAVIV_PERFMON_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
new file mode 100644
index 000000000..72e2553fb
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ */
+
+#include <linux/moduleparam.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_dump.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_sched.h"
+#include "state.xml.h"
+
+static int etnaviv_job_hang_limit = 0;
+module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
+static int etnaviv_hw_jobs_limit = 4;
+module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
+
+static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct dma_fence *fence = NULL;
+
+ if (likely(!sched_job->s_fence->finished.error))
+ fence = etnaviv_gpu_submit(submit);
+ else
+ dev_dbg(submit->gpu->dev, "skipping bad job\n");
+
+ return fence;
+}
+
+static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct etnaviv_gpu *gpu = submit->gpu;
+ u32 dma_addr;
+ int change;
+
+ /* block scheduler */
+ drm_sched_stop(&gpu->sched, sched_job);
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timout is
+ * spurious. Bail out.
+ */
+ if (dma_fence_is_signaled(submit->out_fence))
+ goto out_no_timeout;
+
+ /*
+ * If the GPU is still making forward progress on the front-end (which
+ * should never loop) we shift out the timeout to give it a chance to
+ * finish the job.
+ */
+ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ change = dma_addr - gpu->hangcheck_dma_addr;
+ if (gpu->completed_fence != gpu->hangcheck_fence ||
+ change < 0 || change > 16) {
+ gpu->hangcheck_dma_addr = dma_addr;
+ gpu->hangcheck_fence = gpu->completed_fence;
+ goto out_no_timeout;
+ }
+
+ if(sched_job)
+ drm_sched_increase_karma(sched_job);
+
+ /* get the GPU back into the init state */
+ etnaviv_core_dump(submit);
+ etnaviv_gpu_recover_hang(gpu);
+
+ drm_sched_resubmit_jobs(&gpu->sched);
+
+ drm_sched_start(&gpu->sched, true);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+
+out_no_timeout:
+ /* restart scheduler after GPU is usable again */
+ drm_sched_start(&gpu->sched, true);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ etnaviv_submit_put(submit);
+}
+
+static const struct drm_sched_backend_ops etnaviv_sched_ops = {
+ .run_job = etnaviv_sched_run_job,
+ .timedout_job = etnaviv_sched_timedout_job,
+ .free_job = etnaviv_sched_free_job,
+};
+
+int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
+{
+ int ret = 0;
+
+ /*
+ * Hold the fence lock across the whole operation to avoid jobs being
+ * pushed out of order with regard to their sched fence seqnos as
+ * allocated in drm_sched_job_arm.
+ */
+ mutex_lock(&submit->gpu->fence_lock);
+
+ drm_sched_job_arm(&submit->sched_job);
+
+ submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
+ submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
+ submit->out_fence, 0,
+ INT_MAX, GFP_KERNEL);
+ if (submit->out_fence_id < 0) {
+ drm_sched_job_cleanup(&submit->sched_job);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /* the scheduler holds on to the job now */
+ kref_get(&submit->refcount);
+
+ drm_sched_entity_push_job(&submit->sched_job);
+
+out_unlock:
+ mutex_unlock(&submit->gpu->fence_lock);
+
+ return ret;
+}
+
+int etnaviv_sched_init(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
+ etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
+ msecs_to_jiffies(500), NULL, NULL,
+ dev_name(gpu->dev), gpu->dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
+{
+ drm_sched_fini(&gpu->sched);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
new file mode 100644
index 000000000..baebfa069
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ */
+
+#ifndef __ETNAVIV_SCHED_H__
+#define __ETNAVIV_SCHED_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct etnaviv_gpu;
+
+static inline
+struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct etnaviv_gem_submit, sched_job);
+}
+
+int etnaviv_sched_init(struct etnaviv_gpu *gpu);
+void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
+int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit);
+
+#endif /* __ETNAVIV_SCHED_H__ */
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
new file mode 100644
index 000000000..421cb7cc0
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
@@ -0,0 +1,502 @@
+#ifndef STATE_XML
+#define STATE_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
+- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+
+Copyright (C) 2012-2017 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define VARYING_COMPONENT_USE_UNUSED 0x00000000
+#define VARYING_COMPONENT_USE_USED 0x00000001
+#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002
+#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003
+#define FE_DATA_TYPE_BYTE 0x00000000
+#define FE_DATA_TYPE_UNSIGNED_BYTE 0x00000001
+#define FE_DATA_TYPE_SHORT 0x00000002
+#define FE_DATA_TYPE_UNSIGNED_SHORT 0x00000003
+#define FE_DATA_TYPE_INT 0x00000004
+#define FE_DATA_TYPE_UNSIGNED_INT 0x00000005
+#define FE_DATA_TYPE_FLOAT 0x00000008
+#define FE_DATA_TYPE_HALF_FLOAT 0x00000009
+#define FE_DATA_TYPE_FIXED 0x0000000b
+#define FE_DATA_TYPE_INT_10_10_10_2 0x0000000c
+#define FE_DATA_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
+#define FE_DATA_TYPE_BYTE_I 0x0000000e
+#define FE_DATA_TYPE_SHORT_I 0x0000000f
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__MASK 0x00ff0000
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__SHIFT 16
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__MASK)
+#define VIVS_FE 0x00000000
+
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0))
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE 0x00000004
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE 0x00000080
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK 0x00000700
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT 8
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK 0x00003000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT 12
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK 0x0000c000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT 14
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF 0x00000000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON 0x00008000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK 0x00ff0000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT 16
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK)
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK 0xff000000
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT 24
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK)
+
+#define VIVS_FE_CMD_STREAM_BASE_ADDR 0x00000640
+
+#define VIVS_FE_INDEX_STREAM_BASE_ADDR 0x00000644
+
+#define VIVS_FE_INDEX_STREAM_CONTROL 0x00000648
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK 0x00000003
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT 0
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002
+#define VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART 0x00000100
+
+#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c
+
+#define VIVS_FE_VERTEX_STREAM_CONTROL 0x00000650
+
+#define VIVS_FE_COMMAND_ADDRESS 0x00000654
+
+#define VIVS_FE_COMMAND_CONTROL 0x00000658
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT 0
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK)
+#define VIVS_FE_COMMAND_CONTROL_ENABLE 0x00010000
+
+#define VIVS_FE_DMA_STATUS 0x0000065c
+
+#define VIVS_FE_DMA_DEBUG_STATE 0x00000660
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK 0x0000001f
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT 0
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC 0x00000001
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0 0x00000002
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0 0x00000003
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1 0x00000004
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1 0x00000005
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR 0x00000006
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD 0x00000007
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL 0x00000008
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL 0x00000009
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA 0x0000000a
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX 0x0000000b
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW 0x0000000c
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0 0x0000000d
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1 0x0000000e
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0 0x0000000f
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1 0x00000010
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO 0x00000011
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT 0x00000012
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK 0x00000013
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END 0x00000014
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL 0x00000015
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK 0x00000300
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT 8
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START 0x00000100
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ 0x00000200
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END 0x00000300
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK 0x00000c00
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT 10
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID 0x00000400
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID 0x00000800
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK 0x00003000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT 12
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX 0x00001000
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL 0x00002000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK 0x0000c000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT 14
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR 0x00004000
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC 0x00008000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK 0x00030000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT 16
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE 0x00000000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE 0x00010000
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS 0x00020000
+
+#define VIVS_FE_DMA_ADDRESS 0x00000664
+
+#define VIVS_FE_DMA_LOW 0x00000668
+
+#define VIVS_FE_DMA_HIGH 0x0000066c
+
+#define VIVS_FE_AUTO_FLUSH 0x00000670
+
+#define VIVS_FE_PRIMITIVE_RESTART_INDEX 0x00000674
+
+#define VIVS_FE_UNK00678 0x00000678
+
+#define VIVS_FE_UNK0067C 0x0000067c
+
+#define VIVS_FE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_FE_VERTEX_STREAMS__ESIZE 0x00000004
+#define VIVS_FE_VERTEX_STREAMS__LEN 0x00000008
+
+#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00000680 + 0x4*(i0))
+
+#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_FE_GENERIC_ATTRIB__ESIZE 0x00000004
+#define VIVS_FE_GENERIC_ATTRIB__LEN 0x00000010
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK006C0(i0) (0x000006c0 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK00700(i0) (0x00000700 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK00740(i0) (0x00000740 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_SCALE(i0) (0x00000780 + 0x4*(i0))
+
+#define VIVS_FE_HALTI5_UNK007C4 0x000007c4
+
+#define VIVS_FE_HALTI5_UNK007D0(i0) (0x000007d0 + 0x4*(i0))
+#define VIVS_FE_HALTI5_UNK007D0__ESIZE 0x00000004
+#define VIVS_FE_HALTI5_UNK007D0__LEN 0x00000002
+
+#define VIVS_FE_HALTI5_UNK007D8 0x000007d8
+
+#define VIVS_FE_DESC_START 0x000007dc
+
+#define VIVS_FE_DESC_END 0x000007e0
+
+#define VIVS_FE_DESC_AVAIL 0x000007e4
+#define VIVS_FE_DESC_AVAIL_COUNT__MASK 0x0000007f
+#define VIVS_FE_DESC_AVAIL_COUNT__SHIFT 0
+#define VIVS_FE_DESC_AVAIL_COUNT(x) (((x) << VIVS_FE_DESC_AVAIL_COUNT__SHIFT) & VIVS_FE_DESC_AVAIL_COUNT__MASK)
+
+#define VIVS_FE_FENCE_WAIT_DATA_LOW 0x000007e8
+
+#define VIVS_FE_FENCE_WAIT_DATA_HIGH 0x000007f4
+
+#define VIVS_FE_ROBUSTNESS_UNK007F8 0x000007f8
+
+#define VIVS_GL 0x00000000
+
+#define VIVS_GL_PIPE_SELECT 0x00003800
+#define VIVS_GL_PIPE_SELECT_PIPE__MASK 0x00000001
+#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT 0
+#define VIVS_GL_PIPE_SELECT_PIPE(x) (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK)
+
+#define VIVS_GL_EVENT 0x00003804
+#define VIVS_GL_EVENT_EVENT_ID__MASK 0x0000001f
+#define VIVS_GL_EVENT_EVENT_ID__SHIFT 0
+#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
+#define VIVS_GL_EVENT_FROM_FE 0x00000020
+#define VIVS_GL_EVENT_FROM_PE 0x00000040
+#define VIVS_GL_EVENT_FROM_BLT 0x00000080
+#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00
+#define VIVS_GL_EVENT_SOURCE__SHIFT 8
+#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
+
+#define VIVS_GL_SEMAPHORE_TOKEN 0x00003808
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK 0x0000001f
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT 0
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK)
+#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00
+#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8
+#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28__MASK 0x30000000
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28__SHIFT 28
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_UNK28__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_UNK28__MASK)
+
+#define VIVS_GL_FLUSH_CACHE 0x0000380c
+#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001
+#define VIVS_GL_FLUSH_CACHE_COLOR 0x00000002
+#define VIVS_GL_FLUSH_CACHE_TEXTURE 0x00000004
+#define VIVS_GL_FLUSH_CACHE_PE2D 0x00000008
+#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010
+#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020
+#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040
+#define VIVS_GL_FLUSH_CACHE_UNK10 0x00000400
+#define VIVS_GL_FLUSH_CACHE_UNK11 0x00000800
+#define VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK12 0x00001000
+#define VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK13 0x00002000
+
+#define VIVS_GL_FLUSH_MMU 0x00003810
+#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1 0x00000002
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2 0x00000004
+#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU 0x00000008
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4 0x00000010
+
+#define VIVS_GL_VERTEX_ELEMENT_CONFIG 0x00003814
+
+#define VIVS_GL_MULTI_SAMPLE_CONFIG 0x00003818
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK 0x00000003
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT 0
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE 0x00000000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X 0x00000001
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X 0x00000002
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK 0x00000008
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK 0x000000f0
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT 4
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK 0x00000100
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK 0x00007000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT 12
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK 0x00008000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK 0x00030000
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT 16
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK)
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK 0x00080000
+
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS 0x0000381c
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK 0x000000ff
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT 0
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
+
+#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820
+
+#define VIVS_GL_OCCLUSION_QUERY_ADDR 0x00003824
+
+#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0))
+#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004
+#define VIVS_GL_VARYING_COMPONENT_USE__LEN 0x00000002
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK 0x00000003
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT 0
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK 0x0000000c
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT 2
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK 0x00000030
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT 4
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK 0x000000c0
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT 6
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK 0x00000300
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT 8
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK 0x00000c00
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT 10
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK 0x00003000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT 12
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK 0x0000c000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT 14
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK 0x00030000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT 16
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK 0x000c0000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT 18
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK 0x00300000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT 20
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK 0x00c00000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT 22
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK 0x03000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT 24
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK 0x0c000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT 26
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK 0x30000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT 28
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK)
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK 0xc0000000
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
+
+#define VIVS_GL_UNK0382C 0x0000382c
+
+#define VIVS_GL_OCCLUSION_QUERY_CONTROL 0x00003830
+
+#define VIVS_GL_UNK03834 0x00003834
+
+#define VIVS_GL_UNK03838 0x00003838
+
+#define VIVS_GL_API_MODE 0x0000384c
+#define VIVS_GL_API_MODE_OPENGL 0x00000000
+#define VIVS_GL_API_MODE_OPENVG 0x00000001
+#define VIVS_GL_API_MODE_OPENCL 0x00000002
+
+#define VIVS_GL_CONTEXT_POINTER 0x00003850
+
+#define VIVS_GL_UNK03854 0x00003854
+
+#define VIVS_GL_BUG_FIXES 0x00003860
+
+#define VIVS_GL_FENCE_OUT_ADDRESS 0x00003868
+
+#define VIVS_GL_FENCE_OUT_DATA_LOW 0x0000386c
+
+#define VIVS_GL_HALTI5_UNK03884 0x00003884
+
+#define VIVS_GL_HALTI5_SH_SPECIALS 0x00003888
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__MASK 0x0000007f
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__SHIFT 0
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__MASK 0x00007f00
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__SHIFT 8
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16__MASK 0x007f0000
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16__SHIFT 16
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_UNK16__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_UNK16__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24__MASK 0xff000000
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24__SHIFT 24
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_UNK24__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_UNK24__MASK)
+
+#define VIVS_GL_GS_UNK0388C 0x0000388c
+
+#define VIVS_GL_FENCE_OUT_DATA_HIGH 0x00003898
+
+#define VIVS_GL_SHADER_INDEX 0x0000389c
+
+#define VIVS_GL_GS_UNK038A0(i0) (0x000038a0 + 0x4*(i0))
+#define VIVS_GL_GS_UNK038A0__ESIZE 0x00000004
+#define VIVS_GL_GS_UNK038A0__LEN 0x00000008
+
+#define VIVS_GL_HALTI5_UNK038C0(i0) (0x000038c0 + 0x4*(i0))
+#define VIVS_GL_HALTI5_UNK038C0__ESIZE 0x00000004
+#define VIVS_GL_HALTI5_UNK038C0__LEN 0x00000010
+
+#define VIVS_GL_SECURITY_UNK3900 0x00003900
+
+#define VIVS_GL_SECURITY_UNK3904 0x00003904
+
+#define VIVS_GL_UNK03A00 0x00003a00
+
+#define VIVS_GL_UNK03A04 0x00003a04
+
+#define VIVS_GL_UNK03A08 0x00003a08
+
+#define VIVS_GL_UNK03A0C 0x00003a0c
+
+#define VIVS_GL_UNK03A10 0x00003a10
+
+#define VIVS_GL_STALL_TOKEN 0x00003c00
+#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f
+#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0
+#define VIVS_GL_STALL_TOKEN_FROM(x) (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK)
+#define VIVS_GL_STALL_TOKEN_TO__MASK 0x00001f00
+#define VIVS_GL_STALL_TOKEN_TO__SHIFT 8
+#define VIVS_GL_STALL_TOKEN_TO(x) (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK)
+#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000
+#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000
+
+#define VIVS_NFE 0x00000000
+
+#define VIVS_NFE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_NFE_VERTEX_STREAMS__ESIZE 0x00000004
+#define VIVS_NFE_VERTEX_STREAMS__LEN 0x00000010
+
+#define VIVS_NFE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00014600 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_CONTROL(i0) (0x00014640 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_UNK14680(i0) (0x00014680 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_ROBUSTNESS_UNK146C0(i0) (0x000146c0 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB__ESIZE 0x00000004
+#define VIVS_NFE_GENERIC_ATTRIB__LEN 0x00000020
+
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0(i0) (0x00017800 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__MASK 0x0000000f
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__SHIFT 0
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__MASK 0x00000030
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__SHIFT 4
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__MASK 0x00000700
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__SHIFT 8
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__MASK 0x00003000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__SHIFT 12
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE__MASK 0x0000c000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE__SHIFT 14
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE_OFF 0x00000000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE_ON 0x00008000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__MASK 0x00ff0000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__SHIFT 16
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__MASK)
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17880(i0) (0x00017880 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17900(i0) (0x00017900 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17980(i0) (0x00017980 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_SCALE(i0) (0x00017a00 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1(i0) (0x00017a80 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__MASK 0x000000ff
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__SHIFT 0
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_NONCONSECUTIVE 0x00000800
+
+#define VIVS_DUMMY 0x00000000
+
+#define VIVS_DUMMY_DUMMY 0x0003fffc
+
+
+#endif /* STATE_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_3d.xml.h b/drivers/gpu/drm/etnaviv/state_3d.xml.h
new file mode 100644
index 000000000..ebbd4fcf3
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_3d.xml.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef STATE_3D_XML
+#define STATE_3D_XML
+
+/* This is a cut-down version of the state_3d.xml.h file */
+
+#define VIVS_TS_FLUSH_CACHE 0x00001650
+#define VIVS_TS_FLUSH_CACHE_FLUSH 0x00000001
+
+#define VIVS_NTE_DESCRIPTOR_FLUSH 0x00014c44
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__MASK 0xf0000000
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__SHIFT 28
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28(x) (((x) << VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__SHIFT) & VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__MASK)
+
+#endif /* STATE_3D_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h
new file mode 100644
index 000000000..0e8bcf9dc
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h
@@ -0,0 +1,54 @@
+#ifndef STATE_BLT_XML
+#define STATE_BLT_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
+- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+
+Copyright (C) 2012-2017 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+/* This is a cut-down version of the state_blt.xml.h file */
+
+#define VIVS_BLT_SET_COMMAND 0x000140ac
+
+#define VIVS_BLT_ENABLE 0x000140b8
+#define VIVS_BLT_ENABLE_ENABLE 0x00000001
+
+#endif /* STATE_BLT_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
new file mode 100644
index 000000000..deaaa99fa
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -0,0 +1,570 @@
+#ifndef STATE_HI_XML
+#define STATE_HI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state.xml ( 26666 bytes, from 2019-12-20 21:20:35)
+- common.xml ( 35468 bytes, from 2018-02-10 13:09:26)
+- common_3d.xml ( 15058 bytes, from 2019-12-28 20:02:03)
+- state_hi.xml ( 30552 bytes, from 2019-12-28 20:02:48)
+- copyright.xml ( 1597 bytes, from 2018-02-10 13:09:26)
+- state_2d.xml ( 51552 bytes, from 2018-02-10 13:09:26)
+- state_3d.xml ( 83098 bytes, from 2019-12-28 20:02:03)
+- state_blt.xml ( 14252 bytes, from 2019-10-20 19:59:15)
+- state_vg.xml ( 5975 bytes, from 2018-02-10 13:09:26)
+
+Copyright (C) 2012-2019 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001
+#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002
+#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003
+#define MMU_EXCEPTION_OUT_OF_BOUND 0x00000004
+#define MMU_EXCEPTION_READ_SECURITY_VIOLATION 0x00000005
+#define MMU_EXCEPTION_WRITE_SECURITY_VIOLATION 0x00000006
+#define VIVS_HI 0x00000000
+
+#define VIVS_HI_CLOCK_CONTROL 0x00000000
+#define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS 0x00000001
+#define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS 0x00000002
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK 0x000001fc
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT 2
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x) (((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK)
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD 0x00000200
+#define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING 0x00000400
+#define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS 0x00000800
+#define VIVS_HI_CLOCK_CONTROL_SOFT_RESET 0x00001000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_3D 0x00010000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_2D 0x00020000
+#define VIVS_HI_CLOCK_CONTROL_IDLE_VG 0x00040000
+#define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU 0x00080000
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK 0x00f00000
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT 20
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x) (((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK)
+
+#define VIVS_HI_IDLE_STATE 0x00000004
+#define VIVS_HI_IDLE_STATE_FE 0x00000001
+#define VIVS_HI_IDLE_STATE_DE 0x00000002
+#define VIVS_HI_IDLE_STATE_PE 0x00000004
+#define VIVS_HI_IDLE_STATE_SH 0x00000008
+#define VIVS_HI_IDLE_STATE_PA 0x00000010
+#define VIVS_HI_IDLE_STATE_SE 0x00000020
+#define VIVS_HI_IDLE_STATE_RA 0x00000040
+#define VIVS_HI_IDLE_STATE_TX 0x00000080
+#define VIVS_HI_IDLE_STATE_VG 0x00000100
+#define VIVS_HI_IDLE_STATE_IM 0x00000200
+#define VIVS_HI_IDLE_STATE_FP 0x00000400
+#define VIVS_HI_IDLE_STATE_TS 0x00000800
+#define VIVS_HI_IDLE_STATE_BL 0x00001000
+#define VIVS_HI_IDLE_STATE_ASYNCFE 0x00002000
+#define VIVS_HI_IDLE_STATE_MC 0x00004000
+#define VIVS_HI_IDLE_STATE_PPA 0x00008000
+#define VIVS_HI_IDLE_STATE_WD 0x00010000
+#define VIVS_HI_IDLE_STATE_NN 0x00020000
+#define VIVS_HI_IDLE_STATE_TP 0x00040000
+#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000
+
+#define VIVS_HI_AXI_CONFIG 0x00000008
+#define VIVS_HI_AXI_CONFIG_AWID__MASK 0x0000000f
+#define VIVS_HI_AXI_CONFIG_AWID__SHIFT 0
+#define VIVS_HI_AXI_CONFIG_AWID(x) (((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK)
+#define VIVS_HI_AXI_CONFIG_ARID__MASK 0x000000f0
+#define VIVS_HI_AXI_CONFIG_ARID__SHIFT 4
+#define VIVS_HI_AXI_CONFIG_ARID(x) (((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK)
+#define VIVS_HI_AXI_CONFIG_AWCACHE__MASK 0x00000f00
+#define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT 8
+#define VIVS_HI_AXI_CONFIG_AWCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK)
+#define VIVS_HI_AXI_CONFIG_ARCACHE__MASK 0x0000f000
+#define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT 12
+#define VIVS_HI_AXI_CONFIG_ARCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK)
+
+#define VIVS_HI_AXI_STATUS 0x0000000c
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK 0x0000000f
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT 0
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK)
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK 0x000000f0
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT 4
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK)
+#define VIVS_HI_AXI_STATUS_DET_WR_ERR 0x00000100
+#define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200
+
+#define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x3fffffff
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
+#define VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION 0x40000000
+#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000
+
+#define VIVS_HI_INTR_ENBL 0x00000014
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK 0xffffffff
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT 0
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x) (((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK)
+
+#define VIVS_HI_CHIP_IDENTITY 0x00000018
+#define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK 0xff000000
+#define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT 24
+#define VIVS_HI_CHIP_IDENTITY_FAMILY(x) (((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK 0x00ff0000
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT 16
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT(x) (((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK)
+#define VIVS_HI_CHIP_IDENTITY_REVISION__MASK 0x0000f000
+#define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT 12
+#define VIVS_HI_CHIP_IDENTITY_REVISION(x) (((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
+
+#define VIVS_HI_CHIP_FEATURE 0x0000001c
+
+#define VIVS_HI_CHIP_MODEL 0x00000020
+
+#define VIVS_HI_CHIP_REV 0x00000024
+
+#define VIVS_HI_CHIP_DATE 0x00000028
+
+#define VIVS_HI_CHIP_TIME 0x0000002c
+
+#define VIVS_HI_CHIP_CUSTOMER_ID 0x00000030
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034
+
+#define VIVS_HI_CACHE_CONTROL 0x00000038
+
+#define VIVS_HI_MEMORY_COUNTER_RESET 0x0000003c
+
+#define VIVS_HI_PROFILE_READ_BYTES8 0x00000040
+
+#define VIVS_HI_PROFILE_WRITE_BYTES8 0x00000044
+
+#define VIVS_HI_CHIP_SPECS 0x00000048
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK 0x0000000f
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT 0
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK 0x000000f0
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT 4
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x) (((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK 0x00000f00
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT 8
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK 0x0001f000
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT 12
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK 0x01f00000
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT 20
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK 0x0e000000
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT 25
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x) (((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK 0xf0000000
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT 28
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
+
+#define VIVS_HI_PROFILE_WRITE_BURSTS 0x0000004c
+
+#define VIVS_HI_PROFILE_WRITE_REQUESTS 0x00000050
+
+#define VIVS_HI_PROFILE_READ_BURSTS 0x00000058
+
+#define VIVS_HI_PROFILE_READ_REQUESTS 0x0000005c
+
+#define VIVS_HI_PROFILE_READ_LASTS 0x00000060
+
+#define VIVS_HI_GP_OUT0 0x00000064
+
+#define VIVS_HI_GP_OUT1 0x00000068
+
+#define VIVS_HI_GP_OUT2 0x0000006c
+
+#define VIVS_HI_AXI_CONTROL 0x00000070
+#define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE 0x00000001
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_1 0x00000074
+
+#define VIVS_HI_PROFILE_TOTAL_CYCLES 0x00000078
+
+#define VIVS_HI_PROFILE_IDLE_CYCLES 0x0000007c
+
+#define VIVS_HI_CHIP_SPECS_2 0x00000080
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK 0x000000ff
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT 0
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK 0x0000ff00
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT 8
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK 0xffff0000
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT 16
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x) (((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_2 0x00000084
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088
+
+#define VIVS_HI_CHIP_SPECS_3 0x0000008c
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK 0x000001f0
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT 4
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK 0x00000007
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT 0
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK)
+
+#define VIVS_HI_COMPRESSION_FLAGS 0x00000090
+#define VIVS_HI_COMPRESSION_FLAGS_DEC300 0x00000040
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094
+
+#define VIVS_HI_CHIP_SPECS_4 0x0000009c
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK 0x0001f000
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT 12
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK)
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_5 0x000000a0
+
+#define VIVS_HI_CHIP_PRODUCT_ID 0x000000a8
+
+#define VIVS_HI_BLT_INTR 0x000000d4
+
+#define VIVS_HI_CHIP_ECO_ID 0x000000e8
+
+#define VIVS_HI_AUXBIT 0x000000ec
+
+#define VIVS_PM 0x00000000
+
+#define VIVS_PM_POWER_CONTROLS 0x00000100
+#define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING 0x00000001
+#define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING 0x00000002
+#define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING 0x00000004
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK 0x000000f0
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT 4
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK)
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK 0xffff0000
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT 16
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK)
+
+#define VIVS_PM_MODULE_CONTROLS 0x00000104
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ 0x00010000
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ 0x00020000
+
+#define VIVS_PM_MODULE_STATUS 0x00000108
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SH 0x00000008
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PA 0x00000010
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SE 0x00000020
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_RA 0x00000040
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX 0x00000080
+
+#define VIVS_PM_PULSE_EATER 0x0000010c
+#define VIVS_PM_PULSE_EATER_DISABLE 0x00000001
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD__MASK 0x0000ff00
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD__SHIFT 8
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD(x) (((x) << VIVS_PM_PULSE_EATER_DVFS_PERIOD__SHIFT) & VIVS_PM_PULSE_EATER_DVFS_PERIOD__MASK)
+#define VIVS_PM_PULSE_EATER_UNK16 0x00010000
+#define VIVS_PM_PULSE_EATER_UNK17 0x00020000
+#define VIVS_PM_PULSE_EATER_INTERNAL_DFS 0x00040000
+#define VIVS_PM_PULSE_EATER_UNK19 0x00080000
+#define VIVS_PM_PULSE_EATER_UNK20 0x00100000
+#define VIVS_PM_PULSE_EATER_UNK22 0x00400000
+#define VIVS_PM_PULSE_EATER_UNK23 0x00800000
+
+#define VIVS_MMUv2 0x00000000
+
+#define VIVS_MMUv2_SAFE_ADDRESS 0x00000180
+
+#define VIVS_MMUv2_CONFIGURATION 0x00000184
+#define VIVS_MMUv2_CONFIGURATION_MODE__MASK 0x00000001
+#define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT 0
+#define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K 0x00000000
+#define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K 0x00000001
+#define VIVS_MMUv2_CONFIGURATION_MODE_MASK 0x00000008
+#define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK 0x00000010
+#define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT 4
+#define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH 0x00000010
+#define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK 0x00000080
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK 0x00000100
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK 0xfffffc00
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT 10
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK)
+
+#define VIVS_MMUv2_STATUS 0x00000188
+#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003
+#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0
+#define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030
+#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4
+#define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300
+#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8
+#define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK)
+#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000
+#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12
+#define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK)
+
+#define VIVS_MMUv2_CONTROL 0x0000018c
+#define VIVS_MMUv2_CONTROL_ENABLE 0x00000001
+
+#define VIVS_MMUv2_EXCEPTION_ADDR(i0) (0x00000190 + 0x4*(i0))
+#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004
+#define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004
+
+#define VIVS_MMUv2_PROFILE_BLT_READ 0x000001a4
+
+#define VIVS_MMUv2_PTA_CONFIG 0x000001ac
+#define VIVS_MMUv2_PTA_CONFIG_INDEX__MASK 0x0000ffff
+#define VIVS_MMUv2_PTA_CONFIG_INDEX__SHIFT 0
+#define VIVS_MMUv2_PTA_CONFIG_INDEX(x) (((x) << VIVS_MMUv2_PTA_CONFIG_INDEX__SHIFT) & VIVS_MMUv2_PTA_CONFIG_INDEX__MASK)
+#define VIVS_MMUv2_PTA_CONFIG_UNK16 0x00010000
+
+#define VIVS_MMUv2_AXI_POLICY(i0) (0x000001c0 + 0x4*(i0))
+#define VIVS_MMUv2_AXI_POLICY__ESIZE 0x00000004
+#define VIVS_MMUv2_AXI_POLICY__LEN 0x00000008
+
+#define VIVS_MMUv2_SEC_EXCEPTION_ADDR 0x00000380
+
+#define VIVS_MMUv2_SEC_STATUS 0x00000384
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0__MASK 0x00000003
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0__SHIFT 0
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION0__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1__MASK 0x00000030
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1__SHIFT 4
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION1__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2__MASK 0x00000300
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2__SHIFT 8
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION2__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3__MASK 0x00003000
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3__SHIFT 12
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION3__MASK)
+
+#define VIVS_MMUv2_SEC_CONTROL 0x00000388
+#define VIVS_MMUv2_SEC_CONTROL_ENABLE 0x00000001
+
+#define VIVS_MMUv2_PTA_ADDRESS_LOW 0x0000038c
+
+#define VIVS_MMUv2_PTA_ADDRESS_HIGH 0x00000390
+
+#define VIVS_MMUv2_PTA_CONTROL 0x00000394
+#define VIVS_MMUv2_PTA_CONTROL_ENABLE 0x00000001
+
+#define VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW 0x00000398
+
+#define VIVS_MMUv2_SEC_SAFE_ADDR_LOW 0x0000039c
+
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG 0x000003a0
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__MASK 0x000000ff
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__SHIFT 0
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(x) (((x) << VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__SHIFT) & VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__MASK)
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_UNK15 0x00008000
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__MASK 0x00ff0000
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__SHIFT 16
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(x) (((x) << VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__SHIFT) & VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__MASK)
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_UNK31 0x80000000
+
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL 0x000003a4
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__SHIFT 0
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__MASK)
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE 0x00010000
+
+#define VIVS_MMUv2_AHB_CONTROL 0x000003a8
+#define VIVS_MMUv2_AHB_CONTROL_RESET 0x00000001
+#define VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS 0x00000002
+
+#define VIVS_MC 0x00000000
+
+#define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400
+
+#define VIVS_MC_MMU_TX_PAGE_TABLE 0x00000404
+
+#define VIVS_MC_MMU_PE_PAGE_TABLE 0x00000408
+
+#define VIVS_MC_MMU_PEZ_PAGE_TABLE 0x0000040c
+
+#define VIVS_MC_MMU_RA_PAGE_TABLE 0x00000410
+
+#define VIVS_MC_DEBUG_MEMORY 0x00000414
+#define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320 0x00000008
+#define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS 0x00100000
+#define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS 0x00200000
+
+#define VIVS_MC_MEMORY_BASE_ADDR_RA 0x00000418
+
+#define VIVS_MC_MEMORY_BASE_ADDR_FE 0x0000041c
+
+#define VIVS_MC_MEMORY_BASE_ADDR_TX 0x00000420
+
+#define VIVS_MC_MEMORY_BASE_ADDR_PEZ 0x00000424
+
+#define VIVS_MC_MEMORY_BASE_ADDR_PE 0x00000428
+
+#define VIVS_MC_MEMORY_TIMING_CONTROL 0x0000042c
+
+#define VIVS_MC_MEMORY_FLUSH 0x00000430
+
+#define VIVS_MC_PROFILE_CYCLE_COUNTER 0x00000438
+
+#define VIVS_MC_DEBUG_READ0 0x0000043c
+
+#define VIVS_MC_DEBUG_READ1 0x00000440
+
+#define VIVS_MC_DEBUG_WRITE 0x00000444
+
+#define VIVS_MC_PROFILE_RA_READ 0x00000448
+
+#define VIVS_MC_PROFILE_TX_READ 0x0000044c
+
+#define VIVS_MC_PROFILE_FE_READ 0x00000450
+
+#define VIVS_MC_PROFILE_PE_READ 0x00000454
+
+#define VIVS_MC_PROFILE_DE_READ 0x00000458
+
+#define VIVS_MC_PROFILE_SH_READ 0x0000045c
+
+#define VIVS_MC_PROFILE_PA_READ 0x00000460
+
+#define VIVS_MC_PROFILE_SE_READ 0x00000464
+
+#define VIVS_MC_PROFILE_MC_READ 0x00000468
+
+#define VIVS_MC_PROFILE_HI_READ 0x0000046c
+
+#define VIVS_MC_PROFILE_CONFIG0 0x00000470
+#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff
+#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x0000ff00
+#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8
+#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x00ff0000
+#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE 0x00020000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000
+#define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0xff000000
+#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24
+#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER 0x08000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER 0x09000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER 0x0a000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER 0x0b000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER 0x0c000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER 0x0d000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER 0x0e000000
+#define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000
+
+#define VIVS_MC_PROFILE_CONFIG1 0x00000474
+#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x000000ff
+#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003
+#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004
+#define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER 0x00000005
+#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006
+#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007
+#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008
+#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x0000ff00
+#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8
+#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100
+#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x00ff0000
+#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16
+#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000
+#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z 0x00020000
+#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT 0x00030000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER 0x00090000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
+#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000
+#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0xff000000
+#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS 0x02000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS 0x03000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN 0x04000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT 0x05000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT 0x06000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT 0x07000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT 0x08000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT 0x09000000
+#define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000
+
+#define VIVS_MC_PROFILE_CONFIG2 0x00000478
+#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x000000ff
+#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003
+#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x0000ff00
+#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200
+#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_BLT__MASK 0xff000000
+#define VIVS_MC_PROFILE_CONFIG2_BLT__SHIFT 24
+#define VIVS_MC_PROFILE_CONFIG2_BLT_UNK0 0x00000000
+
+#define VIVS_MC_PROFILE_CONFIG3 0x0000047c
+
+#define VIVS_MC_BUS_CONFIG 0x00000480
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK 0x0000000f
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT 0
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK)
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK 0x000000f0
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT 4
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK)
+
+#define VIVS_MC_START_COMPOSITION 0x00000554
+
+#define VIVS_MC_FLAGS 0x00000558
+#define VIVS_MC_FLAGS_128B_MERGE 0x00000001
+#define VIVS_MC_FLAGS_TPCV11_COMPRESSION 0x08000000
+
+#define VIVS_MC_L2_CACHE_CONFIG 0x0000055c
+
+#define VIVS_MC_PROFILE_L2_READ 0x00000564
+
+
+#endif /* STATE_HI_XML */