summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/i915/gt
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c331
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.h38
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_engine_cs.c456
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_engine_cs.h39
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c467
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h77
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_renderstate.c297
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c453
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.h15
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderstate.c261
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c781
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h138
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c994
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.h22
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_renderstate.c965
-rw-r--r--drivers/gpu/drm/i915/gt/gen9_renderstate.c981
-rw-r--r--drivers/gpu/drm/i915/gt/hsw_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c520
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.h63
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h55
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c631
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h372
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.h19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c97
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h316
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h357
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2392
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c373
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c305
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h111
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h258
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_stats.h61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h666
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c308
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c4186
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h42
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c1330
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c926
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c132
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h459
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c310
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c1095
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h116
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c246
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h43
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c212
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c119
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.h53
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c519
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c522
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c417
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h92
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c670
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c109
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h1591
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c262
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h32
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c116
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.h36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c783
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h299
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c647
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h696
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hwconfig.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c162
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1848
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h143
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h81
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c1135
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.h66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c680
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c323
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c821
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c287
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c254
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c1557
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h80
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c336
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h142
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c1422
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c2591
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h121
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h117
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c900
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h182
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c299
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c493
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h103
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h92
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c3031
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h29
-rw-r--r--drivers/gpu/drm/i915/gt/ivb_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c445
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.h32
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c451
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.c27
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.h13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c423
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c429
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c423
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c4521
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c218
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2060
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c71
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.h13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c1977
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c871
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c455
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c254
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.h12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c388
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring.c110
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c298
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c1319
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.h17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c307
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c1425
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c1395
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c29
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.h17
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/README46
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm119
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm117
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c170
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.h26
-rw-r--r--drivers/gpu/drm/i915/gt/st_shmem_utils.c63
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c540
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h183
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h240
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h191
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h49
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h41
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h97
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h234
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h218
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c915
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h469
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c905
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h25
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c1685
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h32
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c1250
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h120
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c87
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c203
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h500
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c164
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c930
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h105
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c175
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c81
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h155
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c750
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h46
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c5192
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c266
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c29
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c734
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h115
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c1051
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h278
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h92
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c302
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c159
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c183
207 files changed, 85353 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
new file mode 100644
index 000000000..1c82caf52
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gen2_engine_cs.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_engine.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_ring.h"
+
+int gen2_emit_flush(struct i915_request *rq, u32 mode)
+{
+ unsigned int num_store_dw = 12;
+ u32 cmd, *cs;
+
+ cmd = MI_FLUSH;
+ if (mode & EMIT_INVALIDATE)
+ cmd |= MI_READ_FLUSH;
+
+ cs = intel_ring_begin(rq, 2 + 4 * num_store_dw);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = cmd;
+ while (num_store_dw--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ }
+ *cs++ = cmd;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd, *cs;
+ int i;
+
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH;
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_EXE_FLUSH;
+ if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5)
+ cmd |= MI_INVALIDATE_ISP;
+ }
+
+ i = 2;
+ if (mode & EMIT_INVALIDATE)
+ i += 20;
+
+ cs = intel_ring_begin(rq, i);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = cmd;
+
+ /*
+ * A random delay to let the CS invalidate take effect? Without this
+ * delay, the GPU relocation path fails as the CS does not see
+ * the updated contents. Just as important, if we apply the flushes
+ * to the EMIT_FLUSH branch (i.e. immediately after the relocation
+ * write and before the invalidate on the next batch), the relocations
+ * still fail. This implies that is a delay following invalidation
+ * that is required to reset the caches as opposed to a delay to
+ * ensure the memory is written.
+ */
+ if (mode & EMIT_INVALIDATE) {
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ for (i = 0; i < 12; i++)
+ *cs++ = MI_FLUSH;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
+ *cs++ = cmd;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
+ int flush, int post)
+{
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH;
+
+ while (flush--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
+ *cs++ = rq->fence.seqno;
+ }
+
+ while (post--) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+ }
+
+ *cs++ = MI_USER_INTERRUPT;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return __gen2_emit_breadcrumb(rq, cs, 16, 8);
+}
+
+u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return __gen2_emit_breadcrumb(rq, cs, 8, 8);
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT SZ_256K
+#define I830_TLB_ENTRIES (2)
+#define I830_WA_SIZE max(I830_TLB_ENTRIES * SZ_4K, I830_BATCH_LIMIT)
+int i830_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 *cs, cs_offset =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
+
+ GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Evict the invalid PTE TLBs */
+ *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+ *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+ *cs++ = cs_offset;
+ *cs++ = 0xdeadbeef;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ cs = intel_ring_begin(rq, 6 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Blit the batch (which has now all relocs applied) to the
+ * stable batch scratch bo area (so that the CS never
+ * stumbles over its tlb invalidation bug) ...
+ */
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+ *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+ *cs++ = cs_offset;
+ *cs++ = 4096;
+ *cs++ = offset;
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* ... and execute it. */
+ offset = cs_offset;
+ }
+
+ if (!(dispatch_flags & I915_DISPATCH_SECURE))
+ offset |= MI_BATCH_NON_SECURE;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen3_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 *cs;
+
+ if (!(dispatch_flags & I915_DISPATCH_SECURE))
+ offset |= MI_BATCH_NON_SECURE;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen4_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_NON_SECURE_I965;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | security;
+ *cs++ = offset;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+void gen2_irq_enable(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
+ ENGINE_POSTING_READ16(engine, RING_IMR);
+}
+
+void gen2_irq_disable(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
+}
+
+void gen3_irq_enable(struct intel_engine_cs *engine)
+{
+ engine->i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
+}
+
+void gen3_irq_disable(struct intel_engine_cs *engine)
+{
+ engine->i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+}
+
+void gen5_irq_enable(struct intel_engine_cs *engine)
+{
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void gen5_irq_disable(struct intel_engine_cs *engine)
+{
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
new file mode 100644
index 000000000..a5cd64a65
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN2_ENGINE_CS_H__
+#define __GEN2_ENGINE_CS_H__
+
+#include <linux/types.h>
+
+struct i915_request;
+struct intel_engine_cs;
+
+int gen2_emit_flush(struct i915_request *rq, u32 mode);
+int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode);
+
+u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs);
+u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs);
+
+int i830_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int gen3_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int gen4_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+
+void gen2_irq_enable(struct intel_engine_cs *engine);
+void gen2_irq_disable(struct intel_engine_cs *engine);
+void gen3_irq_enable(struct intel_engine_cs *engine);
+void gen3_irq_disable(struct intel_engine_cs *engine);
+void gen5_irq_enable(struct intel_engine_cs *engine);
+void gen5_irq_disable(struct intel_engine_cs *engine);
+
+#endif /* __GEN2_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
new file mode 100644
index 000000000..5e65550b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gen6_engine_cs.h"
+#include "intel_engine.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_ring.h"
+
+#define HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
+
+/*
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+static int
+gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0; /* low dword */
+ *cs++ = 0; /* high dword */
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs, flags = 0;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ ret = gen6_emit_post_sync_nonzero_flush(rq);
+ if (ret)
+ return ret;
+
+ /*
+ * Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact. And when rearranging requests, the order of flushes is
+ * unknown.
+ */
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+
+ /* Finally we can flush and with it emit the breadcrumb */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = i915_request_active_seqno(rq) |
+ PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+static int mi_flush_dw(struct i915_request *rq, u32 flags)
+{
+ u32 cmd, *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ cmd |= flags;
+
+ *cs++ = cmd;
+ *cs++ = HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
+{
+ return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
+}
+
+int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
+}
+
+int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode)
+{
+ return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
+}
+
+int gen6_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_NON_SECURE_I965;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = __gen6_emit_bb_start(cs, offset, security);
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int
+hsw_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
+{
+ u32 security;
+ u32 *cs;
+
+ security = MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW;
+ if (dispatch_flags & I915_DISPATCH_SECURE)
+ security = 0;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = __gen6_emit_bb_start(cs, offset, security);
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen7_stall_cs(struct i915_request *rq)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+ u32 *cs, flags = 0;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /*
+ * CS_STALL suggests at least a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ /*
+ * Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
+
+ /*
+ * Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set.
+ */
+ gen7_stall_cs(rq);
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = i915_request_active_seqno(rq);
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+#define GEN7_XCS_WA 32
+u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ int i;
+
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->fence.seqno;
+
+ for (i = 0; i < GEN7_XCS_WA; i++) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+ }
+
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+#undef GEN7_XCS_WA
+
+void gen6_irq_enable(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ ENGINE_POSTING_READ(engine, RING_IMR);
+
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void gen6_irq_disable(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void hsw_irq_enable_vecs(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ ENGINE_POSTING_READ(engine, RING_IMR);
+
+ gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
+}
+
+void hsw_irq_disable_vecs(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~0);
+ gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.h b/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
new file mode 100644
index 000000000..76c6bc9f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN6_ENGINE_CS_H__
+#define __GEN6_ENGINE_CS_H__
+
+#include <linux/types.h>
+
+#include "intel_gpu_commands.h"
+
+struct i915_request;
+struct intel_engine_cs;
+
+int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode);
+int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode);
+u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode);
+u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+int gen6_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+int hsw_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags);
+
+void gen6_irq_enable(struct intel_engine_cs *engine);
+void gen6_irq_disable(struct intel_engine_cs *engine);
+
+void hsw_irq_enable_vecs(struct intel_engine_cs *engine);
+void hsw_irq_disable_vecs(struct intel_engine_cs *engine);
+
+#endif /* __GEN6_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
new file mode 100644
index 000000000..5aaacc53f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gem/i915_gem_internal.h"
+
+#include "gen6_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_gt_regs.h"
+#include "intel_engine_regs.h"
+#include "intel_gt.h"
+
+/* Write pde (index) from the page directory @pd to the page table @pt */
+static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
+{
+ dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
+
+ /* Caller needs to make sure the write completes if necessary */
+ iowrite32(GEN6_PDE_ADDR_ENCODE(addr) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
+}
+
+void gen7_ppgtt_enable(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 ecochk;
+
+ intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
+
+ ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ if (IS_HASWELL(i915)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
+}
+
+void gen6_ppgtt_enable(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_rmw(uncore,
+ GAC_ECO_BITS,
+ 0,
+ ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
+
+ intel_uncore_rmw(uncore,
+ GAB_CTL,
+ 0,
+ GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ intel_uncore_rmw(uncore,
+ GAM_ECOCHK,
+ 0,
+ ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+
+ if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
+ intel_uncore_write(uncore,
+ GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ const gen6_pte_t scratch_pte = vm->scratch[0]->encode;
+ unsigned int pde = first_entry / GEN6_PTES;
+ unsigned int pte = first_entry % GEN6_PTES;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+
+ while (num_entries) {
+ struct i915_page_table * const pt =
+ i915_pt_entry(ppgtt->base.pd, pde++);
+ const unsigned int count = min(num_entries, GEN6_PTES - pte);
+ gen6_pte_t *vaddr;
+
+ num_entries -= count;
+
+ GEM_BUG_ON(count > atomic_read(&pt->used));
+ if (!atomic_sub_return(count, &pt->used))
+ ppgtt->scan_for_unused_pt = true;
+
+ /*
+ * Note that the hw doesn't support removing PDE on the fly
+ * (they are cached inside the context with no means to
+ * invalidate the cache), so we can only reset the PTE
+ * entries back to scratch.
+ */
+
+ vaddr = px_vaddr(pt);
+ memset32(vaddr + pte, scratch_pte, count);
+
+ pte = 0;
+ }
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pd = ppgtt->pd;
+ unsigned int first_entry = vma_res->start / I915_GTT_PAGE_SIZE;
+ unsigned int act_pt = first_entry / GEN6_PTES;
+ unsigned int act_pte = first_entry % GEN6_PTES;
+ const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
+ struct sgt_dma iter = sgt_dma(vma_res);
+ gen6_pte_t *vaddr;
+
+ GEM_BUG_ON(!pd->entry[act_pt]);
+
+ vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
+ do {
+ GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
+ vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
+
+ iter.dma += I915_GTT_PAGE_SIZE;
+ if (iter.dma == iter.max) {
+ iter.sg = __sg_next(iter.sg);
+ if (!iter.sg || sg_dma_len(iter.sg) == 0)
+ break;
+
+ iter.dma = sg_dma_address(iter.sg);
+ iter.max = iter.dma + sg_dma_len(iter.sg);
+ }
+
+ if (++act_pte == GEN6_PTES) {
+ vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
+ act_pte = 0;
+ }
+ } while (1);
+
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ start = round_down(start, SZ_64K);
+ end = round_up(end, SZ_64K) - start;
+
+ mutex_lock(&ppgtt->flush);
+
+ gen6_for_each_pde(pt, pd, start, end, pde)
+ gen6_write_pde(ppgtt, pde, pt);
+
+ mb();
+ ioread32(ppgtt->pd_addr + pde - 1);
+ gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
+ mb();
+
+ mutex_unlock(&ppgtt->flush);
+}
+
+static void gen6_alloc_va_range(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 start, u64 length)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt;
+ bool flush = false;
+ u64 from = start;
+ unsigned int pde;
+
+ spin_lock(&pd->lock);
+ gen6_for_each_pde(pt, pd, start, length, pde) {
+ const unsigned int count = gen6_pte_count(start, length);
+
+ if (!pt) {
+ spin_unlock(&pd->lock);
+
+ pt = stash->pt[0];
+ __i915_gem_object_pin_pages(pt->base);
+
+ fill32_px(pt, vm->scratch[0]->encode);
+
+ spin_lock(&pd->lock);
+ if (!pd->entry[pde]) {
+ stash->pt[0] = pt->stash;
+ atomic_set(&pt->used, 0);
+ pd->entry[pde] = pt;
+ } else {
+ pt = pd->entry[pde];
+ }
+
+ flush = true;
+ }
+
+ atomic_add(count, &pt->used);
+ }
+ spin_unlock(&pd->lock);
+
+ if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
+ gen6_flush_pd(ppgtt, from, start);
+ }
+}
+
+static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_address_space * const vm = &ppgtt->base.vm;
+ int ret;
+
+ ret = setup_scratch_page(vm);
+ if (ret)
+ return ret;
+
+ vm->scratch[0]->encode =
+ vm->pte_encode(px_dma(vm->scratch[0]),
+ I915_CACHE_NONE, PTE_READ_ONLY);
+
+ vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(vm->scratch[1])) {
+ ret = PTR_ERR(vm->scratch[1]);
+ goto err_scratch0;
+ }
+
+ ret = map_pt_dma(vm, vm->scratch[1]);
+ if (ret)
+ goto err_scratch1;
+
+ fill32_px(vm->scratch[1], vm->scratch[0]->encode);
+
+ return 0;
+
+err_scratch1:
+ i915_gem_object_put(vm->scratch[1]);
+err_scratch0:
+ i915_gem_object_put(vm->scratch[0]);
+ vm->scratch[0] = NULL;
+ return ret;
+}
+
+static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt;
+ u32 pde;
+
+ gen6_for_all_pdes(pt, pd, pde)
+ if (pt)
+ free_pt(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+
+ gen6_ppgtt_free_pd(ppgtt);
+ free_scratch(vm);
+
+ if (ppgtt->base.pd)
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
+
+ mutex_destroy(&ppgtt->flush);
+}
+
+static void pd_vma_bind(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct gen6_ppgtt *ppgtt = vma_res->private;
+ u32 ggtt_offset = vma_res->start / I915_GTT_PAGE_SIZE;
+
+ ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10;
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
+
+ gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
+}
+
+static void pd_vma_unbind(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+ struct gen6_ppgtt *ppgtt = vma_res->private;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ if (!ppgtt->scan_for_unused_pt)
+ return;
+
+ /* Free all no longer used page tables */
+ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
+ if (!pt || atomic_read(&pt->used))
+ continue;
+
+ free_pt(&ppgtt->base.vm, pt);
+ pd->entry[pde] = NULL;
+ }
+
+ ppgtt->scan_for_unused_pt = false;
+}
+
+static const struct i915_vma_ops pd_vma_ops = {
+ .bind_vma = pd_vma_bind,
+ .unbind_vma = pd_vma_unbind,
+};
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ int err;
+
+ GEM_BUG_ON(!kref_read(&ppgtt->base.vm.ref));
+
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
+ return 0;
+
+ /* grab the ppgtt resv to pin the object */
+ err = i915_vm_lock_objects(&ppgtt->base.vm, ww);
+ if (err)
+ return err;
+
+ /*
+ * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+ * allocator works in address space sizes, so it's multiplied by page
+ * size. We allocate at the top of the GTT to avoid fragmentation.
+ */
+ if (!atomic_read(&ppgtt->pin_count)) {
+ err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH);
+
+ GEM_BUG_ON(ppgtt->vma->fence);
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma));
+ }
+ if (!err)
+ atomic_inc(&ppgtt->pin_count);
+
+ return err;
+}
+
+static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj)
+{
+ obj->mm.pages = ZERO_SIZE_PTR;
+ return 0;
+}
+
+static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+}
+
+static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = {
+ .name = "pd_dummy_obj",
+ .get_pages = pd_dummy_obj_get_pages,
+ .put_pages = pd_dummy_obj_put_pages,
+};
+
+static struct i915_page_directory *
+gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt)
+{
+ struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt;
+ struct i915_page_directory *pd;
+ int err;
+
+ pd = __alloc_pd(I915_PDES);
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915,
+ &pd_dummy_obj_ops,
+ I915_PDES * SZ_4K);
+ if (IS_ERR(pd->pt.base)) {
+ err = PTR_ERR(pd->pt.base);
+ pd->pt.base = NULL;
+ goto err_pd;
+ }
+
+ pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm);
+ pd->pt.base->shares_resv_from = &ppgtt->base.vm;
+
+ ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL);
+ if (IS_ERR(ppgtt->vma)) {
+ err = PTR_ERR(ppgtt->vma);
+ ppgtt->vma = NULL;
+ goto err_pd;
+ }
+
+ /* The dummy object we create is special, override ops.. */
+ ppgtt->vma->ops = &pd_vma_ops;
+ ppgtt->vma->private = ppgtt;
+ return pd;
+
+err_pd:
+ free_pd(&ppgtt->base.vm, pd);
+ return ERR_PTR(err);
+}
+
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
+{
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
+}
+
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
+{
+ struct i915_ggtt * const ggtt = gt->ggtt;
+ struct gen6_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&ppgtt->flush);
+
+ ppgtt_init(&ppgtt->base, gt, 0);
+ ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
+ ppgtt->base.vm.top = 1;
+
+ ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+
+ ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma;
+ ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+ goto err_put;
+
+ ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
+ if (IS_ERR(ppgtt->base.pd)) {
+ err = PTR_ERR(ppgtt->base.pd);
+ goto err_put;
+ }
+
+ return &ppgtt->base;
+
+err_put:
+ i915_vm_put(&ppgtt->base.vm);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
new file mode 100644
index 000000000..5e5cf2ec3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN6_PPGTT_H__
+#define __GEN6_PPGTT_H__
+
+#include "intel_gtt.h"
+
+struct i915_gem_ww_ctx;
+
+struct gen6_ppgtt {
+ struct i915_ppgtt base;
+
+ struct mutex flush;
+ struct i915_vma *vma;
+ gen6_pte_t __iomem *pd_addr;
+ u32 pp_dir;
+
+ atomic_t pin_count;
+
+ bool scan_for_unused_pt;
+};
+
+static inline u32 gen6_pte_index(u32 addr)
+{
+ return i915_pte_index(addr, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pte_count(u32 addr, u32 length)
+{
+ return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
+}
+
+static inline u32 gen6_pde_index(u32 addr)
+{
+ return i915_pde_index(addr, GEN6_PDE_SHIFT);
+}
+
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
+
+static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
+{
+ BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
+ return __to_gen6_ppgtt(base);
+}
+
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
+ */
+#define gen6_for_each_pde(pt, pd, start, length, iter) \
+ for (iter = gen6_pde_index(start); \
+ length > 0 && iter < I915_PDES && \
+ (pt = i915_pt_entry(pd, iter), true); \
+ ({ u32 temp = ALIGN(start + 1, 1 << GEN6_PDE_SHIFT); \
+ temp = min(temp - start, length); \
+ start += temp; length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter) \
+ for (iter = 0; \
+ iter < I915_PDES && \
+ (pt = i915_pt_entry(pd, iter), true); \
+ ++iter)
+
+int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww);
+void gen6_ppgtt_unpin(struct i915_ppgtt *base);
+void gen6_ppgtt_enable(struct intel_gt *gt);
+void gen7_ppgtt_enable(struct intel_gt *gt);
+struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/gen6_renderstate.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
new file mode 100644
index 000000000..555e83f3a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
+#include "intel_renderstate.h"
+
+static const u32 gen6_null_state_relocs[] = {
+ 0x00000020,
+ 0x00000024,
+ 0x0000002c,
+ 0x000001e0,
+ 0x000001e4,
+ -1,
+};
+
+static const u32 gen6_null_state_batch[] = {
+ 0x69040000,
+ 0x790d0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x61010008,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x61020000,
+ 0x00000000,
+ 0x78050001,
+ 0x00000018,
+ 0x00000000,
+ 0x780d1002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000420,
+ 0x78150003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79050005,
+ 0xe0040000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79100000,
+ 0x00000000,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x780e0002,
+ 0x00000441,
+ 0x00000401,
+ 0x00000401,
+ 0x78021002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000400,
+ 0x78130012,
+ 0x00400810,
+ 0x00000000,
+ 0x20000000,
+ 0x04000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78140007,
+ 0x00000280,
+ 0x08080000,
+ 0x00000000,
+ 0x00060000,
+ 0x4e080002,
+ 0x00100400,
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11330000,
+ 0x02850004,
+ 0x11220000,
+ 0x78011002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000200,
+ 0x78080003,
+ 0x00002000,
+ 0x00000448, /* reloc */
+ 0x00000448, /* reloc */
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000220, /* state start */
+ 0x00000240,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0060005a,
+ 0x204077be,
+ 0x000000c0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x206077be,
+ 0x000000c0,
+ 0x008d0080,
+ 0x0060005a,
+ 0x208077be,
+ 0x000000d0,
+ 0x008d0040,
+ 0x0060005a,
+ 0x20a077be,
+ 0x000000d0,
+ 0x008d0080,
+ 0x00000201,
+ 0x20080061,
+ 0x00000000,
+ 0x00000000,
+ 0x00600001,
+ 0x20200022,
+ 0x008d0000,
+ 0x00000000,
+ 0x02800031,
+ 0x21c01cc9,
+ 0x00000020,
+ 0x0a8a0001,
+ 0x00600001,
+ 0x204003be,
+ 0x008d01c0,
+ 0x00000000,
+ 0x00600001,
+ 0x206003be,
+ 0x008d01e0,
+ 0x00000000,
+ 0x00600001,
+ 0x208003be,
+ 0x008d0200,
+ 0x00000000,
+ 0x00600001,
+ 0x20a003be,
+ 0x008d0220,
+ 0x00000000,
+ 0x00600001,
+ 0x20c003be,
+ 0x008d0240,
+ 0x00000000,
+ 0x00600001,
+ 0x20e003be,
+ 0x008d0260,
+ 0x00000000,
+ 0x00600001,
+ 0x210003be,
+ 0x008d0280,
+ 0x00000000,
+ 0x00600001,
+ 0x212003be,
+ 0x008d02a0,
+ 0x00000000,
+ 0x05800031,
+ 0x24001cc8,
+ 0x00000040,
+ 0x90019000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0000007e,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30000000,
+ 0x00000124,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80000031,
+ 0x00000003,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(6);
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
new file mode 100644
index 000000000..317efb145
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_regs.h"
+
+#define GT3_INLINE_DATA_DELAYS 0x1E00
+#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
+
+struct cb_kernel {
+ const void *data;
+ u32 size;
+};
+
+#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
+
+#include "ivb_clear_kernel.c"
+static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
+
+#include "hsw_clear_kernel.c"
+static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
+
+struct batch_chunk {
+ struct i915_vma *vma;
+ u32 offset;
+ u32 *start;
+ u32 *end;
+ u32 max_items;
+};
+
+struct batch_vals {
+ u32 max_threads;
+ u32 state_start;
+ u32 surface_start;
+ u32 surface_height;
+ u32 surface_width;
+ u32 size;
+};
+
+static int num_primitives(const struct batch_vals *bv)
+{
+ /*
+ * We need to saturate the GPU with work in order to dispatch
+ * a shader on every HW thread, and clear the thread-local registers.
+ * In short, we have to dispatch work faster than the shaders can
+ * run in order to fill the EU and occupy each HW thread.
+ */
+ return bv->max_threads;
+}
+
+static void
+batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
+{
+ if (IS_HASWELL(i915)) {
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1:
+ bv->max_threads = 70;
+ break;
+ case 2:
+ bv->max_threads = 140;
+ break;
+ case 3:
+ bv->max_threads = 280;
+ break;
+ }
+ bv->surface_height = 16 * 16;
+ bv->surface_width = 32 * 2 * 16;
+ } else {
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ case 1: /* including vlv */
+ bv->max_threads = 36;
+ break;
+ case 2:
+ bv->max_threads = 128;
+ break;
+ }
+ bv->surface_height = 16 * 8;
+ bv->surface_width = 32 * 16;
+ }
+ bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
+ bv->surface_start = bv->state_start + SZ_4K;
+ bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
+}
+
+static void batch_init(struct batch_chunk *bc,
+ struct i915_vma *vma,
+ u32 *start, u32 offset, u32 max_bytes)
+{
+ bc->vma = vma;
+ bc->offset = offset;
+ bc->start = start + bc->offset / sizeof(*bc->start);
+ bc->end = bc->start;
+ bc->max_items = max_bytes / sizeof(*bc->start);
+}
+
+static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
+{
+ return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
+}
+
+static u32 batch_addr(const struct batch_chunk *bc)
+{
+ return bc->vma->node.start;
+}
+
+static void batch_add(struct batch_chunk *bc, const u32 d)
+{
+ GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
+ *bc->end++ = d;
+}
+
+static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
+{
+ u32 *map;
+
+ if (align) {
+ u32 *end = PTR_ALIGN(bc->end, align);
+
+ memset32(bc->end, 0, end - bc->end);
+ bc->end = end;
+ }
+
+ map = bc->end;
+ bc->end += items;
+
+ return map;
+}
+
+static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
+{
+ GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
+ return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
+}
+
+static u32
+gen7_fill_surface_state(struct batch_chunk *state,
+ const u32 dst_offset,
+ const struct batch_vals *bv)
+{
+ u32 surface_h = bv->surface_height;
+ u32 surface_w = bv->surface_width;
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+#define SURFACE_2D 1
+#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
+#define RENDER_CACHE_READ_WRITE 1
+
+ *cs++ = SURFACE_2D << 29 |
+ (SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
+ (RENDER_CACHE_READ_WRITE << 8);
+
+ *cs++ = batch_addr(state) + dst_offset;
+
+ *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
+ *cs++ = surface_w;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+#define SHADER_CHANNELS(r, g, b, a) \
+ (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
+ *cs++ = SHADER_CHANNELS(4, 5, 6, 7);
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_binding_table(struct batch_chunk *state,
+ const struct batch_vals *bv)
+{
+ u32 surface_start =
+ gen7_fill_surface_state(state, bv->surface_start, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = surface_start - state->offset;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_kernel_data(struct batch_chunk *state,
+ const u32 *data,
+ const u32 size)
+{
+ return batch_offset(state,
+ memcpy(batch_alloc_bytes(state, 64, size),
+ data, size));
+}
+
+static u32
+gen7_fill_interface_descriptor(struct batch_chunk *state,
+ const struct batch_vals *bv,
+ const struct cb_kernel *kernel,
+ unsigned int count)
+{
+ u32 kernel_offset =
+ gen7_fill_kernel_data(state, kernel->data, kernel->size);
+ u32 binding_table = gen7_fill_binding_table(state, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8 * count);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = kernel_offset;
+ *cs++ = (1 << 7) | (1 << 13);
+ *cs++ = 0;
+ *cs++ = (binding_table - state->offset) | 1;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* 1 - 63dummy idds */
+ memset32(cs, 0x00, (count - 1) * 8);
+ batch_advance(state, cs + (count - 1) * 8);
+
+ return offset;
+}
+
+static void
+gen7_emit_state_base_address(struct batch_chunk *batch,
+ u32 surface_state_base)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 10);
+
+ *cs++ = STATE_BASE_ADDRESS | (10 - 2);
+ /* general */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* surface */
+ *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
+ /* dynamic */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* indirect */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* instruction */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+
+ /* general/dynamic/indirect/instruction access Bound */
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_vfe_state(struct batch_chunk *batch,
+ const struct batch_vals *bv,
+ u32 urb_size, u32 curbe_size,
+ u32 mode)
+{
+ u32 threads = bv->max_threads - 1;
+ u32 *cs = batch_alloc_items(batch, 32, 8);
+
+ *cs++ = MEDIA_VFE_STATE | (8 - 2);
+
+ /* scratch buffer */
+ *cs++ = 0;
+
+ /* number of threads & urb entries for GPGPU vs Media Mode */
+ *cs++ = threads << 16 | 1 << 8 | mode << 2;
+
+ *cs++ = 0;
+
+ /* urb entry size & curbe size in 256 bits unit */
+ *cs++ = urb_size << 16 | curbe_size;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
+ const u32 interface_descriptor,
+ unsigned int count)
+{
+ u32 *cs = batch_alloc_items(batch, 8, 4);
+
+ *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
+ *cs++ = 0;
+ *cs++ = count * 8 * sizeof(*cs);
+
+ /*
+ * interface descriptor address - it is relative to the dynamics base
+ * address
+ */
+ *cs++ = interface_descriptor;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_media_object(struct batch_chunk *batch,
+ unsigned int media_object_index)
+{
+ unsigned int x_offset = (media_object_index % 16) * 64;
+ unsigned int y_offset = (media_object_index / 16) * 16;
+ unsigned int pkt = 6 + 3;
+ u32 *cs;
+
+ cs = batch_alloc_items(batch, 8, pkt);
+
+ *cs++ = MEDIA_OBJECT | (pkt - 2);
+
+ /* interface descriptor offset */
+ *cs++ = 0;
+
+ /* without indirect data */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* inline */
+ *cs++ = y_offset << 16 | x_offset;
+ *cs++ = 0;
+ *cs++ = GT3_INLINE_DATA_DELAYS;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 4);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 10);
+
+ /* ivb: Stall before STATE_CACHE_INVALIDATE */
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void emit_batch(struct i915_vma * const vma,
+ u32 *start,
+ const struct batch_vals *bv)
+{
+ struct drm_i915_private *i915 = vma->vm->i915;
+ const unsigned int desc_count = 1;
+ const unsigned int urb_size = 1;
+ struct batch_chunk cmds, state;
+ u32 descriptors;
+ unsigned int i;
+
+ batch_init(&cmds, vma, start, 0, bv->state_start);
+ batch_init(&state, vma, start, bv->state_start, SZ_4K);
+
+ descriptors = gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+
+ /* Reset inherited context registers */
+ gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ batch_add(&cmds, 0xffff0000 |
+ ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
+ HIZ_RAW_STALL_OPT_DISABLE :
+ 0));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+ gen7_emit_pipeline_invalidate(&cmds);
+ gen7_emit_pipeline_flush(&cmds);
+
+ /* Switch to the media pipeline and our base address */
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
+ batch_add(&cmds, MI_NOOP);
+ gen7_emit_pipeline_invalidate(&cmds);
+
+ gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_state_base_address(&cmds, descriptors);
+ gen7_emit_pipeline_invalidate(&cmds);
+
+ /* Set the clear-residual kernel state */
+ gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+ gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
+
+ /* Execute the kernel on all HW threads */
+ for (i = 0; i < num_primitives(bv); i++)
+ gen7_emit_media_object(&cmds, i);
+
+ batch_add(&cmds, MI_BATCH_BUFFER_END);
+}
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ struct batch_vals bv;
+ u32 *batch;
+
+ batch_get_defaults(engine->i915, &bv);
+ if (!vma)
+ return bv.size;
+
+ GEM_BUG_ON(vma->obj->base.size < bv.size);
+
+ batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ emit_batch(vma, memset(batch, 0, bv.size), &bv);
+
+ i915_gem_object_flush_map(vma->obj);
+ __i915_gem_object_release_map(vma->obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.h b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
new file mode 100644
index 000000000..bb100748e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __GEN7_RENDERCLEAR_H__
+#define __GEN7_RENDERCLEAR_H__
+
+struct intel_engine_cs;
+struct i915_vma;
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma);
+
+#endif /* __GEN7_RENDERCLEAR_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderstate.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
new file mode 100644
index 000000000..c36e84d30
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
+#include "intel_renderstate.h"
+
+static const u32 gen7_null_state_relocs[] = {
+ 0x0000000c,
+ 0x00000010,
+ 0x00000018,
+ 0x000001ec,
+ -1,
+};
+
+static const u32 gen7_null_state_batch[] = {
+ 0x69040000,
+ 0x61010008,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x790d0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78180000,
+ 0x00000001,
+ 0x79160000,
+ 0x00000008,
+ 0x78300000,
+ 0x02010040,
+ 0x78310000,
+ 0x04000000,
+ 0x78320000,
+ 0x04000000,
+ 0x78330000,
+ 0x02000000,
+ 0x78100004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78110005,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78210000,
+ 0x00000000,
+ 0x78130005,
+ 0x00000000,
+ 0x20000000,
+ 0x04000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78140001,
+ 0x20000800,
+ 0x00000000,
+ 0x781e0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78050005,
+ 0xe0040000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x78240000,
+ 0x00000240,
+ 0x78230000,
+ 0x00000260,
+ 0x782f0000,
+ 0x00000280,
+ 0x781f000c,
+ 0x00400810,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78200006,
+ 0x000002c0,
+ 0x08080000,
+ 0x00000000,
+ 0x28000402,
+ 0x00060000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090005,
+ 0x02000000,
+ 0x22220000,
+ 0x02f60000,
+ 0x11230000,
+ 0x02f60004,
+ 0x11230000,
+ 0x78080003,
+ 0x00006008,
+ 0x00000340, /* reloc */
+ 0xffffffff,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000360,
+ 0x79000002,
+ 0xffffffff,
+ 0x00000000,
+ 0x00000000,
+ 0x7b000005,
+ 0x0000000f,
+ 0x00000003,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000031, /* state start */
+ 0x00000003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xf99a130c,
+ 0x799a130c,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000492,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0080005a,
+ 0x2e2077bd,
+ 0x000000c0,
+ 0x008d0040,
+ 0x0080005a,
+ 0x2e6077bd,
+ 0x000000d0,
+ 0x008d0040,
+ 0x02800031,
+ 0x21801fa9,
+ 0x008d0e20,
+ 0x08840001,
+ 0x00800001,
+ 0x2e2003bd,
+ 0x008d0180,
+ 0x00000000,
+ 0x00800001,
+ 0x2e6003bd,
+ 0x008d01c0,
+ 0x00000000,
+ 0x00800001,
+ 0x2ea003bd,
+ 0x008d0200,
+ 0x00000000,
+ 0x00800001,
+ 0x2ee003bd,
+ 0x008d0240,
+ 0x00000000,
+ 0x05800031,
+ 0x20001fa8,
+ 0x008d0e20,
+ 0x90031000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000380,
+ 0x000003a0,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(7);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
new file mode 100644
index 000000000..efc22f9b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include "gen8_engine_cs.h"
+#include "i915_drv.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_lrc.h"
+#include "intel_ring.h"
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ bool vf_flush_wa = false, dc_flush_wa = false;
+ u32 *cs, flags = 0;
+ int len;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (mode & EMIT_FLUSH) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ /*
+ * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+ * pipe control.
+ */
+ if (GRAPHICS_VER(rq->engine->i915) == 9)
+ vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0))
+ dc_flush_wa = true;
+ }
+
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ cs = intel_ring_begin(rq, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (vf_flush_wa)
+ cs = gen8_emit_pipe_control(cs, 0, 0);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd, *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
+{
+ switch (engine->id) {
+ case RCS0:
+ return GEN12_CCS_AUX_INV;
+ case BCS0:
+ return GEN12_BCS0_AUX_INV;
+ case VCS0:
+ return GEN12_VD0_AUX_INV;
+ case VCS2:
+ return GEN12_VD2_AUX_INV;
+ case VECS0:
+ return GEN12_VE0_AUX_INV;
+ case CCS0:
+ return GEN12_CCS0_AUX_INV;
+ default:
+ return INVALID_MMIO_REG;
+ }
+}
+
+static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
+{
+ i915_reg_t reg = gen12_get_aux_inv_reg(engine);
+
+ if (IS_PONTEVECCHIO(engine->i915))
+ return false;
+
+ /*
+ * So far platforms supported by i915 having flat ccs do not require
+ * AUX invalidation. Check also whether the engine requires it.
+ */
+ return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
+}
+
+u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
+{
+ i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
+ u32 gsi_offset = engine->gt->uncore->gsi_offset;
+
+ if (!gen12_needs_ccs_aux_inv(engine))
+ return cs;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
+ *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
+ *cs++ = AUX_INV;
+
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_REGISTER_POLL |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ return cs;
+}
+
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+{
+ struct intel_engine_cs *engine = rq->engine;
+
+ /*
+ * On Aux CCS platforms the invalidation of the Aux
+ * table requires quiescing memory traffic beforehand
+ */
+ if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
+ u32 flags = 0;
+ u32 *cs;
+
+ /*
+ * L3 fabric flush is needed for AUX CCS invalidation
+ * which happens as part of pipe-control so we can
+ * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3
+ * deals with Protected Memory which is not needed for
+ * AUX CCS invalidation and lead to unwanted side effects.
+ */
+ if (mode & EMIT_FLUSH)
+ flags |= PIPE_CONTROL_FLUSH_L3;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl,adl-p */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (!HAS_3D_PIPELINE(engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen12_emit_pipe_control(cs,
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(rq, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs, count;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ if (!HAS_3D_PIPELINE(engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+
+ count = 8;
+ if (gen12_needs_ccs_aux_inv(rq->engine))
+ count += 8;
+
+ cs = intel_ring_begin(rq, count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ cs = gen12_emit_aux_table_inv(engine, cs);
+
+ *cs++ = preparser_disable(false);
+ intel_ring_advance(rq, cs);
+ }
+
+ return 0;
+}
+
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+{
+ u32 cmd = 4;
+ u32 *cs;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd += 2;
+
+ if (gen12_needs_ccs_aux_inv(rq->engine))
+ cmd += 8;
+ }
+
+ cs = intel_ring_begin(rq, cmd);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(true);
+
+ cmd = MI_FLUSH_DW + 1;
+
+ /*
+ * We always require a command barrier so that subsequent
+ * commands, such as breadcrumb interrupts, are strictly ordered
+ * wrt the contents of the write cache being flushed to memory
+ * (and thus being coherent from the CPU).
+ */
+ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+
+ if (mode & EMIT_INVALIDATE) {
+ cmd |= MI_INVALIDATE_TLB;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cmd |= MI_INVALIDATE_BSD;
+ }
+
+ *cs++ = cmd;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+
+ cs = gen12_emit_aux_table_inv(rq->engine, cs);
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(false);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 preempt_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+ const struct intel_timeline *tl;
+
+ /* Before the request is executed, the timeline is fixed */
+ tl = rcu_dereference_protected(rq->timeline,
+ !i915_request_signaled(rq));
+
+ /* See the comment in i915_request_active_seqno(). */
+ return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
+}
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = hwsp_offset(rq);
+ *cs++ = 0;
+ *cs++ = rq->fence.seqno - 1;
+
+ /*
+ * Check if we have been preempted before we even get started.
+ *
+ * After this point i915_request_started() reports true, even if
+ * we get preempted and so are no longer running.
+ *
+ * i915_request_started() is used during preemption processing
+ * to decide if the request is currently inside the user payload
+ * or spinning on a kernel semaphore (or earlier). For no-preemption
+ * requests, we do allow preemption on the semaphore before the user
+ * payload, but do not allow preemption once the request is started.
+ *
+ * i915_request_started() is similarly used during GPU hangs to
+ * determine if the user's payload was guilty, and if so, the
+ * request is banned. Before the request is started, it is assumed
+ * to be unharmed and an innocent victim of another's hang.
+ */
+ *cs++ = MI_NOOP;
+ *cs++ = MI_ARB_CHECK;
+
+ intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
+ __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
+
+ return 0;
+}
+
+static int __gen125_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags,
+ u32 arb)
+{
+ struct intel_context *ce = rq->context;
+ u32 wa_offset = lrc_indirect_bb(ce);
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | arb;
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0));
+ *cs++ = wa_offset + DG2_PREDICATE_RESULT_WA;
+ *cs++ = 0;
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ /* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8;
+ *cs++ = wa_offset + DG2_PREDICATE_RESULT_BB;
+ *cs++ = 0;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen125_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ return __gen125_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE);
+}
+
+int gen125_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ return __gen125_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE);
+}
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * WaDisableCtxRestoreArbitration:bdw,chv
+ *
+ * We don't need to perform MI_ARB_ENABLE as often as we do (in
+ * particular all the gen that do not need the w/a at all!), if we
+ * took care to make sure that on every switch into this context
+ * (both ordinary and for preemption) that arbitrartion was enabled
+ * we would be fine. However, for gen8 there is another w/a that
+ * requires us to not preempt inside GPGPU execution, so we keep
+ * arbitration disabled for gen8 batches. Arbitration will be
+ * re-enabled before we close the request
+ * (engine->emit_fini_breadcrumb).
+ */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* FIXME(BDW+): Address space and security selectors. */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ if (unlikely(i915_request_has_nopreempt(rq)))
+ return gen8_emit_bb_start_noarb(rq, offset, len, flags);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static void assert_request_valid(struct i915_request *rq)
+{
+ struct intel_ring *ring __maybe_unused = rq->ring;
+
+ /* Can we unwind this request without appearing to go forwards? */
+ GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
+{
+ /* Ensure there's always at least one preemption point per-request. */
+ *cs++ = MI_ARB_CHECK;
+ *cs++ = MI_NOOP;
+ rq->wa_tail = intel_ring_offset(rq, cs);
+
+ /* Check that entire request is less than half the ring */
+ assert_request_valid(rq);
+
+ return cs;
+}
+
+static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine) &&
+ !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
+ cs = emit_preempt_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
+}
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
+}
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ return gen8_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = preempt_address(rq->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ return cs;
+}
+
+/* Wa_14014475959:dg2 */
+#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
+static u32 ccs_semaphore_offset(struct i915_request *rq)
+{
+ return i915_ggtt_offset(rq->context->state) +
+ (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
+}
+
+/* Wa_14014475959:dg2 */
+static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
+{
+ int i;
+
+ *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
+ MI_ATOMIC_MOVE;
+ *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /*
+ * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP)
+ * to align. 4 DWs above + 8 filler DWs here.
+ */
+ for (i = 0; i < 8; ++i)
+ *cs++ = 0;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = 0;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(rq->engine) &&
+ !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
+ cs = gen12_emit_preempt_busywait(rq, cs);
+
+ /* Wa_14014475959:dg2 */
+ if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
+ cs = ccs_emit_wa_busywait(rq, cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return gen8_emit_wa_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+{
+ /* XXX Stalling flush before seqno write; post-sync not */
+ cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
+
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+{
+ struct drm_i915_private *i915 = rq->engine->i915;
+ u32 flags = (PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ /* Wa_1409600907 */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+
+ if (!HAS_3D_PIPELINE(rq->engine->i915))
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (rq->engine->class == COMPUTE_CLASS)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+
+ cs = gen12_emit_ggtt_write_rcs(cs,
+ rq->fence.seqno,
+ hwsp_offset(rq),
+ PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ flags);
+
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
new file mode 100644
index 000000000..651eb786e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __GEN8_ENGINE_CS_H__
+#define __GEN8_ENGINE_CS_H__
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "intel_gt_regs.h"
+#include "intel_gpu_commands.h"
+
+struct intel_engine_cs;
+struct intel_gt;
+struct i915_request;
+
+int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode);
+int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode);
+
+int gen8_emit_init_breadcrumb(struct i915_request *rq);
+
+int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+int gen8_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+
+int gen125_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+int gen125_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+
+u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
+
+u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+
+u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
+
+static inline u32 *
+__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ memset(batch, 0, 6 * sizeof(u32));
+
+ batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
+ batch[1] = flags1;
+ batch[2] = offset;
+
+ return batch + 6;
+}
+
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, 0, flags, offset);
+}
+
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+{
+ return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+}
+
+static inline u32 *
+__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+ *cs++ = 0; /* We're thrashing one extra dword. */
+
+ return cs;
+}
+
+static inline u32*
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ 0,
+ flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32*
+gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ flags0,
+ flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32 *
+__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ *cs++ = (MI_FLUSH_DW + 1) | flags;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static inline u32 *
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ GEM_BUG_ON(gtt_offset & (1 << 5));
+ /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_flush_dw(cs,
+ value,
+ gtt_offset | MI_FLUSH_DW_USE_GTT,
+ flags | MI_FLUSH_DW_OP_STOREDW);
+}
+
+#endif /* __GEN8_ENGINE_CS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
new file mode 100644
index 000000000..2128b7a72
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -0,0 +1,994 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/log2.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "gen8_ppgtt.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "i915_pvinfo.h"
+#include "i915_vgpu.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+
+static u64 gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
+{
+ u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
+
+ if (level != I915_CACHE_NONE)
+ pde |= PPAT_CACHED_PDE;
+ else
+ pde |= PPAT_UNCACHED;
+
+ return pde;
+}
+
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~GEN8_PAGE_RW;
+
+ if (flags & PTE_LM)
+ pte |= GEN12_PPGTT_PTE_LM;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
+static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+{
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
+ enum vgt_g2v_type msg;
+ int i;
+
+ if (create)
+ atomic_inc(px_used(ppgtt->pd)); /* never remove */
+ else
+ atomic_dec(px_used(ppgtt->pd));
+
+ mutex_lock(&i915->vgpu.lock);
+
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const u64 daddr = px_dma(ppgtt->pd);
+
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+
+ msg = create ?
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
+ } else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[i].lo),
+ lower_32_bits(daddr));
+ intel_uncore_write(uncore,
+ vgtif_reg(pdp[i].hi),
+ upper_32_bits(daddr));
+ }
+
+ msg = create ?
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
+ }
+
+ /* g2v_notify atomically (via hv trap) consumes the message packet. */
+ intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
+
+ mutex_unlock(&i915->vgpu.lock);
+}
+
+/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
+#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
+#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
+#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
+#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
+#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
+#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
+#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
+
+#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
+
+static unsigned int
+gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
+{
+ const int shift = gen8_pd_shift(lvl);
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+ GEM_BUG_ON(start >= end);
+ end += ~mask >> gen8_pd_shift(1);
+
+ *idx = i915_pde_index(start, shift);
+ if ((start ^ end) & mask)
+ return GEN8_PDES - *idx;
+ else
+ return i915_pde_index(end, shift) - *idx;
+}
+
+static bool gen8_pd_contains(u64 start, u64 end, int lvl)
+{
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
+
+ GEM_BUG_ON(start >= end);
+ return (start ^ end) & mask && (start & ~mask) == 0;
+}
+
+static unsigned int gen8_pt_count(u64 start, u64 end)
+{
+ GEM_BUG_ON(start >= end);
+ if ((start ^ end) >> gen8_pd_shift(1))
+ return GEN8_PDES - (start & (GEN8_PDES - 1));
+ else
+ return end - start;
+}
+
+static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
+{
+ unsigned int shift = __gen8_pte_shift(vm->top);
+
+ return (vm->total + (1ull << shift) - 1) >> shift;
+}
+
+static struct i915_page_directory *
+gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (vm->top == 2)
+ return ppgtt->pd;
+ else
+ return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
+}
+
+static struct i915_page_directory *
+gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
+{
+ return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
+}
+
+static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ int count, int lvl)
+{
+ if (lvl) {
+ void **pde = pd->entry;
+
+ do {
+ if (!*pde)
+ continue;
+
+ __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
+ } while (pde++, --count);
+ }
+
+ free_px(vm, &pd->pt, lvl);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (intel_vgpu_active(vm->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, false);
+
+ if (ppgtt->pd)
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd,
+ gen8_pd_top_count(vm), vm->top);
+
+ free_scratch(vm);
+}
+
+static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 start, const u64 end, int lvl)
+{
+ const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
+ unsigned int idx, len;
+
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
+
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
+ gen8_pd_contains(start, end, lvl)) {
+ DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
+ __func__, vm, lvl + 1, idx, start, end);
+ clear_pd_entry(pd, idx, scratch);
+ __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
+ start += (u64)I915_PDES << gen8_pd_shift(lvl);
+ continue;
+ }
+
+ if (lvl) {
+ start = __gen8_ppgtt_clear(vm, as_pd(pt),
+ start, end, lvl);
+ } else {
+ unsigned int count;
+ unsigned int pte = gen8_pd_index(start, 0);
+ unsigned int num_ptes;
+ u64 *vaddr;
+
+ count = gen8_pt_count(start, end);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
+ __func__, vm, lvl, start, end,
+ gen8_pd_index(start, 0), count,
+ atomic_read(&pt->used));
+ GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
+
+ num_ptes = count;
+ if (pt->is_compact) {
+ GEM_BUG_ON(num_ptes % 16);
+ GEM_BUG_ON(pte % 16);
+ num_ptes /= 16;
+ pte /= 16;
+ }
+
+ vaddr = px_vaddr(pt);
+ memset64(vaddr + pte,
+ vm->scratch[0]->encode,
+ num_ptes);
+
+ atomic_sub(count, &pt->used);
+ start += count;
+ }
+
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt, lvl);
+ } while (idx++, --len);
+
+ return start;
+}
+
+static void gen8_ppgtt_clear(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ start, start + length, vm->top);
+}
+
+static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_page_directory * const pd,
+ u64 * const start, const u64 end, int lvl)
+{
+ unsigned int idx, len;
+
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(*start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, *start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
+
+ spin_lock(&pd->lock);
+ GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (!pt) {
+ spin_unlock(&pd->lock);
+
+ DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
+ __func__, vm, lvl + 1, idx);
+
+ pt = stash->pt[!!lvl];
+ __i915_gem_object_pin_pages(pt->base);
+
+ fill_px(pt, vm->scratch[lvl]->encode);
+
+ spin_lock(&pd->lock);
+ if (likely(!pd->entry[idx])) {
+ stash->pt[!!lvl] = pt->stash;
+ atomic_set(&pt->used, 0);
+ set_pd_entry(pd, idx, pt);
+ } else {
+ pt = pd->entry[idx];
+ }
+ }
+
+ if (lvl) {
+ atomic_inc(&pt->used);
+ spin_unlock(&pd->lock);
+
+ __gen8_ppgtt_alloc(vm, stash,
+ as_pd(pt), start, end, lvl);
+
+ spin_lock(&pd->lock);
+ atomic_dec(&pt->used);
+ GEM_BUG_ON(!atomic_read(&pt->used));
+ } else {
+ unsigned int count = gen8_pt_count(*start, end);
+
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
+ __func__, vm, lvl, *start, end,
+ gen8_pd_index(*start, 0), count,
+ atomic_read(&pt->used));
+
+ atomic_add(count, &pt->used);
+ /* All other pdes may be simultaneously removed */
+ GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
+ *start += count;
+ }
+ } while (idx++, --len);
+ spin_unlock(&pd->lock);
+}
+
+static void gen8_ppgtt_alloc(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 start, u64 length)
+{
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+
+ __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top);
+}
+
+static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ u64 *start, u64 end, int lvl,
+ void (*fn)(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data),
+ void *data)
+{
+ unsigned int idx, len;
+
+ len = gen8_pd_range(*start, end, lvl--, &idx);
+
+ spin_lock(&pd->lock);
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ atomic_inc(&pt->used);
+ spin_unlock(&pd->lock);
+
+ if (lvl) {
+ __gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
+ fn, data);
+ } else {
+ fn(vm, pt, data);
+ *start += gen8_pt_count(*start, end);
+ }
+
+ spin_lock(&pd->lock);
+ atomic_dec(&pt->used);
+ } while (idx++, --len);
+ spin_unlock(&pd->lock);
+}
+
+static void gen8_ppgtt_foreach(struct i915_address_space *vm,
+ u64 start, u64 length,
+ void (*fn)(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data),
+ void *data)
+{
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+
+ __gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top,
+ fn, data);
+}
+
+static __always_inline u64
+gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
+ struct i915_page_directory *pdp,
+ struct sgt_dma *iter,
+ u64 idx,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_page_directory *pd;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ gen8_pte_t *vaddr;
+
+ pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ do {
+ GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
+ vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
+
+ iter->dma += I915_GTT_PAGE_SIZE;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg || sg_dma_len(iter->sg) == 0) {
+ idx = 0;
+ break;
+ }
+
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + sg_dma_len(iter->sg);
+ }
+
+ if (gen8_pd_index(++idx, 0) == 0) {
+ if (gen8_pd_index(idx, 1) == 0) {
+ /* Limited by sg length for 3lvl */
+ if (gen8_pd_index(idx, 2) == 0)
+ break;
+
+ pd = pdp->entry[gen8_pd_index(idx, 2)];
+ }
+
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ }
+ } while (1);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+
+ return idx;
+}
+
+static void
+xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ struct sgt_dma *iter,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = vm->pte_encode(0, cache_level, flags);
+ unsigned int rem = sg_dma_len(iter->sg);
+ u64 start = vma_res->start;
+
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_address(vm, start);
+ struct i915_page_directory * const pd =
+ i915_pd_entry(pdp, __gen8_pte_index(start, 2));
+ struct i915_page_table *pt =
+ i915_pt_entry(pd, __gen8_pte_index(start, 1));
+ gen8_pte_t encode = pte_encode;
+ unsigned int page_size;
+ gen8_pte_t *vaddr;
+ u16 index, max;
+
+ max = I915_PDES;
+
+ if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+ rem >= I915_GTT_PAGE_SIZE_2M &&
+ !__gen8_pte_index(start, 0)) {
+ index = __gen8_pte_index(start, 1);
+ encode |= GEN8_PDE_PS_2M;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+
+ vaddr = px_vaddr(pd);
+ } else {
+ if (encode & GEN12_PPGTT_PTE_LM) {
+ GEM_BUG_ON(__gen8_pte_index(start, 0) % 16);
+ GEM_BUG_ON(rem < I915_GTT_PAGE_SIZE_64K);
+ GEM_BUG_ON(!IS_ALIGNED(iter->dma,
+ I915_GTT_PAGE_SIZE_64K));
+
+ index = __gen8_pte_index(start, 0) / 16;
+ page_size = I915_GTT_PAGE_SIZE_64K;
+
+ max /= 16;
+
+ vaddr = px_vaddr(pd);
+ vaddr[__gen8_pte_index(start, 1)] |= GEN12_PDE_64K;
+
+ pt->is_compact = true;
+ } else {
+ GEM_BUG_ON(pt->is_compact);
+ index = __gen8_pte_index(start, 0);
+ page_size = I915_GTT_PAGE_SIZE;
+ }
+
+ vaddr = px_vaddr(pt);
+ }
+
+ do {
+ GEM_BUG_ON(rem < page_size);
+ vaddr[index++] = encode | iter->dma;
+
+ start += page_size;
+ iter->dma += page_size;
+ rem -= page_size;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg)
+ break;
+
+ rem = sg_dma_len(iter->sg);
+ if (!rem)
+ break;
+
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + rem;
+
+ if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+ break;
+ }
+ } while (rem >= page_size && index < max);
+
+ vma_res->page_sizes_gtt |= page_size;
+ } while (iter->sg && sg_dma_len(iter->sg));
+}
+
+static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ struct sgt_dma *iter,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ unsigned int rem = sg_dma_len(iter->sg);
+ u64 start = vma_res->start;
+
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_address(vm, start);
+ struct i915_page_directory * const pd =
+ i915_pd_entry(pdp, __gen8_pte_index(start, 2));
+ gen8_pte_t encode = pte_encode;
+ unsigned int maybe_64K = -1;
+ unsigned int page_size;
+ gen8_pte_t *vaddr;
+ u16 index;
+
+ if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+ rem >= I915_GTT_PAGE_SIZE_2M &&
+ !__gen8_pte_index(start, 0)) {
+ index = __gen8_pte_index(start, 1);
+ encode |= GEN8_PDE_PS_2M;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+
+ vaddr = px_vaddr(pd);
+ } else {
+ struct i915_page_table *pt =
+ i915_pt_entry(pd, __gen8_pte_index(start, 1));
+
+ index = __gen8_pte_index(start, 0);
+ page_size = I915_GTT_PAGE_SIZE;
+
+ if (!index &&
+ vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
+ maybe_64K = __gen8_pte_index(start, 1);
+
+ vaddr = px_vaddr(pt);
+ }
+
+ do {
+ GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
+ vaddr[index++] = encode | iter->dma;
+
+ start += page_size;
+ iter->dma += page_size;
+ rem -= page_size;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg)
+ break;
+
+ rem = sg_dma_len(iter->sg);
+ if (!rem)
+ break;
+
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + rem;
+
+ if (maybe_64K != -1 && index < I915_PDES &&
+ !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
+ maybe_64K = -1;
+
+ if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+ break;
+ }
+ } while (rem >= page_size && index < I915_PDES);
+
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+
+ /*
+ * Is it safe to mark the 2M block as 64K? -- Either we have
+ * filled whole page-table with 64K entries, or filled part of
+ * it and have reached the end of the sg table and we have
+ * enough padding.
+ */
+ if (maybe_64K != -1 &&
+ (index == I915_PDES ||
+ (i915_vm_has_scratch_64K(vm) &&
+ !iter->sg && IS_ALIGNED(vma_res->start +
+ vma_res->node_size,
+ I915_GTT_PAGE_SIZE_2M)))) {
+ vaddr = px_vaddr(pd);
+ vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ page_size = I915_GTT_PAGE_SIZE_64K;
+
+ /*
+ * We write all 4K page entries, even when using 64K
+ * pages. In order to verify that the HW isn't cheating
+ * by using the 4K PTE instead of the 64K PTE, we want
+ * to remove all the surplus entries. If the HW skipped
+ * the 64K PTE, it will read/write into the scratch page
+ * instead - which we detect as missing results during
+ * selftests.
+ */
+ if (I915_SELFTEST_ONLY(vm->scrub_64K)) {
+ u16 i;
+
+ encode = vm->scratch[0]->encode;
+ vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
+
+ for (i = 1; i < index; i += 16)
+ memset64(vaddr + i, encode, 15);
+
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ }
+ }
+
+ vma_res->page_sizes_gtt |= page_size;
+ } while (iter->sg && sg_dma_len(iter->sg));
+}
+
+static void gen8_ppgtt_insert(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+ struct sgt_dma iter = sgt_dma(vma_res);
+
+ if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ if (HAS_64K_PAGES(vm->i915))
+ xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
+ else
+ gen8_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
+ } else {
+ u64 idx = vma_res->start >> GEN8_PTE_SHIFT;
+
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_index(vm, idx);
+
+ idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
+ cache_level, flags);
+ } while (idx);
+
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
+ }
+}
+
+static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ u64 idx = offset >> GEN8_PTE_SHIFT;
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_index(vm, idx);
+ struct i915_page_directory *pd =
+ i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+ struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
+ gen8_pte_t *vaddr;
+
+ GEM_BUG_ON(pt->is_compact);
+
+ vaddr = px_vaddr(pt);
+ vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
+ drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
+}
+
+static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ u64 idx = offset >> GEN8_PTE_SHIFT;
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_index(vm, idx);
+ struct i915_page_directory *pd =
+ i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+ struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
+ gen8_pte_t *vaddr;
+
+ GEM_BUG_ON(!IS_ALIGNED(addr, SZ_64K));
+ GEM_BUG_ON(!IS_ALIGNED(offset, SZ_64K));
+
+ if (!pt->is_compact) {
+ vaddr = px_vaddr(pd);
+ vaddr[gen8_pd_index(idx, 1)] |= GEN12_PDE_64K;
+ pt->is_compact = true;
+ }
+
+ vaddr = px_vaddr(pt);
+ vaddr[gen8_pd_index(idx, 0) / 16] = gen8_pte_encode(addr, level, flags);
+}
+
+static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ if (flags & PTE_LM)
+ return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
+ level, flags);
+
+ return gen8_ppgtt_insert_entry(vm, addr, offset, level, flags);
+}
+
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+ u32 pte_flags;
+ int ret;
+ int i;
+
+ /*
+ * If everybody agrees to not to write into the scratch page,
+ * we can reuse it for all vm, keeping contexts and processes separate.
+ */
+ if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+ struct i915_address_space *clone = vm->gt->vm;
+
+ GEM_BUG_ON(!clone->has_read_only);
+
+ vm->scratch_order = clone->scratch_order;
+ for (i = 0; i <= vm->top; i++)
+ vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
+
+ return 0;
+ }
+
+ ret = setup_scratch_page(vm);
+ if (ret)
+ return ret;
+
+ pte_flags = vm->has_read_only;
+ if (i915_gem_object_is_lmem(vm->scratch[0]))
+ pte_flags |= PTE_LM;
+
+ vm->scratch[0]->encode =
+ gen8_pte_encode(px_dma(vm->scratch[0]),
+ I915_CACHE_NONE, pte_flags);
+
+ for (i = 1; i <= vm->top; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto free_scratch;
+ }
+
+ ret = map_pt_dma(vm, obj);
+ if (ret) {
+ i915_gem_object_put(obj);
+ goto free_scratch;
+ }
+
+ fill_px(obj, vm->scratch[i - 1]->encode);
+ obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE);
+
+ vm->scratch[i] = obj;
+ }
+
+ return 0;
+
+free_scratch:
+ while (i--)
+ i915_gem_object_put(vm->scratch[i]);
+ vm->scratch[0] = NULL;
+ return ret;
+}
+
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+{
+ struct i915_address_space *vm = &ppgtt->vm;
+ struct i915_page_directory *pd = ppgtt->pd;
+ unsigned int idx;
+
+ GEM_BUG_ON(vm->top != 2);
+ GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
+
+ for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
+ struct i915_page_directory *pde;
+ int err;
+
+ pde = alloc_pd(vm);
+ if (IS_ERR(pde))
+ return PTR_ERR(pde);
+
+ err = map_pt_dma(vm, pde->pt.base);
+ if (err) {
+ free_pd(vm, pde);
+ return err;
+ }
+
+ fill_px(pde, vm->scratch[1]->encode);
+ set_pd_entry(pd, idx, pde);
+ atomic_inc(px_used(pde)); /* keep pinned */
+ }
+ wmb();
+
+ return 0;
+}
+
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
+{
+ const unsigned int count = gen8_pd_top_count(vm);
+ struct i915_page_directory *pd;
+ int err;
+
+ GEM_BUG_ON(count > I915_PDES);
+
+ pd = __alloc_pd(count);
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(pd->pt.base)) {
+ err = PTR_ERR(pd->pt.base);
+ pd->pt.base = NULL;
+ goto err_pd;
+ }
+
+ err = map_pt_dma(vm, pd->pt.base);
+ if (err)
+ goto err_pd;
+
+ fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
+ atomic_inc(px_used(pd)); /* mark as pinned */
+ return pd;
+
+err_pd:
+ free_pd(vm, pd);
+ return ERR_PTR(err);
+}
+
+/*
+ * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
+ * with a net effect resembling a 2-level page table in normal x86 terms. Each
+ * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
+ * space.
+ *
+ */
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
+{
+ struct i915_page_directory *pd;
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ERR_PTR(-ENOMEM);
+
+ ppgtt_init(ppgtt, gt, lmem_pt_obj_flags);
+ ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
+ ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
+
+ /*
+ * From bdw, there is hw support for read-only pages in the PPGTT.
+ *
+ * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
+ * for now.
+ *
+ * Gen12 has inherited the same read-only fault issue from gen11.
+ */
+ ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
+
+ if (HAS_LMEM(gt->i915)) {
+ ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
+
+ /*
+ * On some platforms the hw has dropped support for 4K GTT pages
+ * when dealing with LMEM, and due to the design of 64K GTT
+ * pages in the hw, we can only mark the *entire* page-table as
+ * operating in 64K GTT mode, since the enable bit is still on
+ * the pde, and not the pte. And since we still need to allow
+ * 4K GTT pages for SMEM objects, we can't have a "normal" 4K
+ * page-table with scratch pointing to LMEM, since that's
+ * undefined from the hw pov. The simplest solution is to just
+ * move the 64K scratch page to SMEM on such platforms and call
+ * it a day, since that should work for all configurations.
+ */
+ if (HAS_64K_PAGES(gt->i915))
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ else
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_lmem;
+ } else {
+ ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ }
+
+ ppgtt->vm.pte_encode = gen8_pte_encode;
+
+ ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert;
+ if (HAS_64K_PAGES(gt->i915))
+ ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
+ else
+ ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear;
+ ppgtt->vm.foreach = gen8_ppgtt_foreach;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
+ pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(pd)) {
+ err = PTR_ERR(pd);
+ goto err_put;
+ }
+ ppgtt->pd = pd;
+
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_put;
+ }
+
+ if (intel_vgpu_active(gt->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, true);
+
+ return ppgtt;
+
+err_put:
+ i915_vm_put(&ppgtt->vm);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
new file mode 100644
index 000000000..f541d1926
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __GEN8_PPGTT_H__
+#define __GEN8_PPGTT_H__
+
+#include <linux/kernel.h>
+
+struct i915_address_space;
+struct intel_gt;
+enum i915_cache_level;
+
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
+
+u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/gen8_renderstate.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
new file mode 100644
index 000000000..ef9d7b0dd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ */
+
+#include "intel_renderstate.h"
+
+static const u32 gen8_null_state_relocs[] = {
+ 0x00000798,
+ 0x000007a4,
+ 0x000007ac,
+ 0x000007bc,
+ -1,
+};
+
+static const u32 gen8_null_state_batch[] = {
+ 0x7a000004,
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x69040000,
+ 0x78140000,
+ 0x04000000,
+ 0x7820000a,
+ 0x00000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x02001808,
+ 0x781f0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000800,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780c0000,
+ 0x00000000,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78300000,
+ 0x08010040,
+ 0x78310000,
+ 0x1e000000,
+ 0x78320000,
+ 0x1e000000,
+ 0x78330000,
+ 0x1e000000,
+ 0x79190002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791a0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791b0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79120000,
+ 0x00000000,
+ 0x79130000,
+ 0x00000000,
+ 0x79140000,
+ 0x00000000,
+ 0x79150000,
+ 0x00000000,
+ 0x79160000,
+ 0x00000000,
+ 0x78150009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x784a0000,
+ 0x00000000,
+ 0x784b0000,
+ 0x00000004,
+ 0x79170101,
+ 0x00000000,
+ 0x00000080,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x60000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6101000e,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00001001,
+ 0x00001001,
+ 0x00000001,
+ 0x00001001,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0xc0000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79080001,
+ 0x00000000,
+ 0x00000000,
+ 0x790a0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x79110000,
+ 0x00000000,
+ 0x780d0000,
+ 0x00000000,
+ 0x79060000,
+ 0x00000000,
+ 0x7907001f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7902000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x790c000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780a0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78080083,
+ 0x00004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x04004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x08004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x10004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x14004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x18004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x1c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x20004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x24004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x28004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x2c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x34004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x38004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x3c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x40004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x44004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x48004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x4c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x50004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x58004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x5c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x60004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x64004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x68004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x70004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x74004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090043,
+ 0x02000000,
+ 0x22220000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x680b0001,
+ 0x78260000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000000,
+ 0x780e0000,
+ 0x00000dc1,
+ 0x78240000,
+ 0x00000e01,
+ 0x784f0000,
+ 0x80000100,
+ 0x784d0000,
+ 0x40000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x782d0000,
+ 0x00000000,
+ 0x782e0000,
+ 0x00000000,
+ 0x782f0000,
+ 0x00000000,
+ 0x780f0000,
+ 0x00000000,
+ 0x78230000,
+ 0x00000e60,
+ 0x78210000,
+ 0x00000e80,
+ 0x7b000005,
+ 0x00000004,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state start */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(8);
diff --git a/drivers/gpu/drm/i915/gt/gen9_renderstate.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
new file mode 100644
index 000000000..428b724de
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Generated by: intel-gpu-tools-1.19-177-g68e2eab2
+ */
+
+#include "intel_renderstate.h"
+
+static const u32 gen9_null_state_relocs[] = {
+ 0x000007a8,
+ 0x000007b4,
+ 0x000007bc,
+ 0x000007cc,
+ -1,
+};
+
+static const u32 gen9_null_state_batch[] = {
+ 0x7a000004,
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x69040300,
+ 0x78140000,
+ 0x04000000,
+ 0x7820000a,
+ 0x00000000,
+ 0x00000000,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78130002,
+ 0x00000000,
+ 0x00000000,
+ 0x02001808,
+ 0x781f0004,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78510009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78100007,
+ 0x00000000,
+ 0x00000000,
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781b0007,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000800,
+ 0x00000000,
+ 0x78110008,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781e0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781d0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78120002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78500003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781c0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780c0000,
+ 0x00000000,
+ 0x78520003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78300000,
+ 0x08010040,
+ 0x78310000,
+ 0x1e000000,
+ 0x78320000,
+ 0x1e000000,
+ 0x78330000,
+ 0x1e000000,
+ 0x79190002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791a0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x791b0002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79120000,
+ 0x00000000,
+ 0x79130000,
+ 0x00000000,
+ 0x79140000,
+ 0x00000000,
+ 0x79150000,
+ 0x00000000,
+ 0x79160000,
+ 0x00000000,
+ 0x78150009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78190009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x781a0009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78160009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78170009,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78490001,
+ 0x00000000,
+ 0x00000000,
+ 0x784a0000,
+ 0x00000000,
+ 0x784b0000,
+ 0x00000004,
+ 0x79170101,
+ 0x00000000,
+ 0x00000080,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79180006,
+ 0x60000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x61010011,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001, /* reloc */
+ 0x00000000,
+ 0x00001001,
+ 0x00001001,
+ 0x00000001,
+ 0x00001001,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x61020001,
+ 0x00000000,
+ 0x00000000,
+ 0x79000002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78050006,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x40000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0x80000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79040002,
+ 0xc0000000,
+ 0x00000000,
+ 0x00000000,
+ 0x79080001,
+ 0x00000000,
+ 0x00000000,
+ 0x790a0001,
+ 0x00000000,
+ 0x00000000,
+ 0x78060003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78070003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78040001,
+ 0x00000000,
+ 0x00000000,
+ 0x79110000,
+ 0x00000000,
+ 0x780d0000,
+ 0x00000000,
+ 0x79060000,
+ 0x00000000,
+ 0x7907001f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7902000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x790c000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x780a0003,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78080083,
+ 0x00004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x04004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x08004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x0c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x10004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x14004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x18004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x1c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x20004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x24004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x28004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x2c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x30004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x34004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x38004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x3c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x40004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x44004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x48004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x4c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x50004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x54004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x58004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x5c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x60004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x64004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x68004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x6c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x70004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x74004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x7c004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x80004000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78090043,
+ 0x02000000,
+ 0x22220000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x78550003,
+ 0x0000000f,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x680b0001,
+ 0x780e0000,
+ 0x00000e01,
+ 0x78240000,
+ 0x00000e41,
+ 0x784f0000,
+ 0x80000100,
+ 0x784d0000,
+ 0x40000000,
+ 0x782b0000,
+ 0x00000000,
+ 0x782c0000,
+ 0x00000000,
+ 0x782d0000,
+ 0x00000000,
+ 0x782e0000,
+ 0x00000000,
+ 0x782f0000,
+ 0x00000000,
+ 0x780f0000,
+ 0x00000000,
+ 0x78230000,
+ 0x00000ea0,
+ 0x78210000,
+ 0x00000ec0,
+ 0x78260000,
+ 0x00000000,
+ 0x78270000,
+ 0x00000000,
+ 0x78280000,
+ 0x00000000,
+ 0x78290000,
+ 0x00000000,
+ 0x782a0000,
+ 0x00000000,
+ 0x7b000005,
+ 0x00000004,
+ 0x00000001,
+ 0x00000000,
+ 0x00000001,
+ 0x00000000,
+ 0x00000000,
+ 0x05000000, /* cmds end */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state start */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* state end */
+};
+
+RO_RENDERSTATE(9);
diff --git a/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
new file mode 100644
index 000000000..b47f9d4a0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC
+ */
+
+static const u32 hsw_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x00000160,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffe0,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffc0,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
new file mode 100644
index 000000000..ecc990ec1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2015-2021 Intel Corporation
+ */
+
+#include <linux/kthread.h>
+#include <linux/string_helpers.h>
+#include <trace/events/dma_fence.h>
+#include <uapi/linux/sched/types.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+
+static bool irq_enable(struct intel_breadcrumbs *b)
+{
+ return intel_engine_irq_enable(b->irq_engine);
+}
+
+static void irq_disable(struct intel_breadcrumbs *b)
+{
+ intel_engine_irq_disable(b->irq_engine);
+}
+
+static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
+{
+ /*
+ * Since we are waiting on a request, the GPU should be busy
+ * and should have its own rpm reference.
+ */
+ if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
+ return;
+
+ /*
+ * The breadcrumb irq will be disarmed on the interrupt after the
+ * waiters are signaled. This gives us a single interrupt window in
+ * which we can add a new waiter and avoid the cost of re-enabling
+ * the irq.
+ */
+ WRITE_ONCE(b->irq_armed, true);
+
+ /* Requests may have completed before we could enable the interrupt. */
+ if (!b->irq_enabled++ && b->irq_enable(b))
+ irq_work_queue(&b->irq_work);
+}
+
+static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
+{
+ if (!b->irq_engine)
+ return;
+
+ spin_lock(&b->irq_lock);
+ if (!b->irq_armed)
+ __intel_breadcrumbs_arm_irq(b);
+ spin_unlock(&b->irq_lock);
+}
+
+static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+{
+ GEM_BUG_ON(!b->irq_enabled);
+ if (!--b->irq_enabled)
+ b->irq_disable(b);
+
+ WRITE_ONCE(b->irq_armed, false);
+ intel_gt_pm_put_async(b->irq_engine->gt);
+}
+
+static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+{
+ spin_lock(&b->irq_lock);
+ if (b->irq_armed)
+ __intel_breadcrumbs_disarm_irq(b);
+ spin_unlock(&b->irq_lock);
+}
+
+static void add_signaling_context(struct intel_breadcrumbs *b,
+ struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->signal_lock);
+
+ spin_lock(&b->signalers_lock);
+ list_add_rcu(&ce->signal_link, &b->signalers);
+ spin_unlock(&b->signalers_lock);
+}
+
+static bool remove_signaling_context(struct intel_breadcrumbs *b,
+ struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->signal_lock);
+
+ if (!list_empty(&ce->signals))
+ return false;
+
+ spin_lock(&b->signalers_lock);
+ list_del_rcu(&ce->signal_link);
+ spin_unlock(&b->signalers_lock);
+
+ return true;
+}
+
+__maybe_unused static bool
+check_signal_order(struct intel_context *ce, struct i915_request *rq)
+{
+ if (rq->context != ce)
+ return false;
+
+ if (!list_is_last(&rq->signal_link, &ce->signals) &&
+ i915_seqno_passed(rq->fence.seqno,
+ list_next_entry(rq, signal_link)->fence.seqno))
+ return false;
+
+ if (!list_is_first(&rq->signal_link, &ce->signals) &&
+ i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
+ rq->fence.seqno))
+ return false;
+
+ return true;
+}
+
+static bool
+__dma_fence_signal(struct dma_fence *fence)
+{
+ return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+}
+
+static void
+__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ fence->timestamp = timestamp;
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+ trace_dma_fence_signaled(fence);
+}
+
+static void
+__dma_fence_signal__notify(struct dma_fence *fence,
+ const struct list_head *list)
+{
+ struct dma_fence_cb *cur, *tmp;
+
+ lockdep_assert_held(fence->lock);
+
+ list_for_each_entry_safe(cur, tmp, list, node) {
+ INIT_LIST_HEAD(&cur->node);
+ cur->func(fence, cur);
+ }
+}
+
+static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
+{
+ if (b->irq_engine)
+ intel_engine_add_retire(b->irq_engine, tl);
+}
+
+static struct llist_node *
+slist_add(struct llist_node *node, struct llist_node *head)
+{
+ node->next = head;
+ return node;
+}
+
+static void signal_irq_work(struct irq_work *work)
+{
+ struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
+ const ktime_t timestamp = ktime_get();
+ struct llist_node *signal, *sn;
+ struct intel_context *ce;
+
+ signal = NULL;
+ if (unlikely(!llist_empty(&b->signaled_requests)))
+ signal = llist_del_all(&b->signaled_requests);
+
+ /*
+ * Keep the irq armed until the interrupt after all listeners are gone.
+ *
+ * Enabling/disabling the interrupt is rather costly, roughly a couple
+ * of hundred microseconds. If we are proactive and enable/disable
+ * the interrupt around every request that wants a breadcrumb, we
+ * quickly drown in the extra orders of magnitude of latency imposed
+ * on request submission.
+ *
+ * So we try to be lazy, and keep the interrupts enabled until no
+ * more listeners appear within a breadcrumb interrupt interval (that
+ * is until a request completes that no one cares about). The
+ * observation is that listeners come in batches, and will often
+ * listen to a bunch of requests in succession. Though note on icl+,
+ * interrupts are always enabled due to concerns with rc6 being
+ * dysfunctional with per-engine interrupt masking.
+ *
+ * We also try to avoid raising too many interrupts, as they may
+ * be generated by userspace batches and it is unfortunately rather
+ * too easy to drown the CPU under a flood of GPU interrupts. Thus
+ * whenever no one appears to be listening, we turn off the interrupts.
+ * Fewer interrupts should conserve power -- at the very least, fewer
+ * interrupt draw less ire from other users of the system and tools
+ * like powertop.
+ */
+ if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers))
+ intel_breadcrumbs_disarm_irq(b);
+
+ rcu_read_lock();
+ atomic_inc(&b->signaler_active);
+ list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
+ struct i915_request *rq;
+
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
+ bool release;
+
+ if (!__i915_request_is_complete(rq))
+ break;
+
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ break;
+
+ /*
+ * Queue for execution after dropping the signaling
+ * spinlock as the callback chain may end up adding
+ * more signalers to the same context or engine.
+ */
+ spin_lock(&ce->signal_lock);
+ list_del_rcu(&rq->signal_link);
+ release = remove_signaling_context(b, ce);
+ spin_unlock(&ce->signal_lock);
+ if (release) {
+ if (intel_timeline_is_last(ce->timeline, rq))
+ add_retire(b, ce->timeline);
+ intel_context_put(ce);
+ }
+
+ if (__dma_fence_signal(&rq->fence))
+ /* We own signal_node now, xfer to local list */
+ signal = slist_add(&rq->signal_node, signal);
+ else
+ i915_request_put(rq);
+ }
+ }
+ atomic_dec(&b->signaler_active);
+ rcu_read_unlock();
+
+ llist_for_each_safe(signal, sn, signal) {
+ struct i915_request *rq =
+ llist_entry(signal, typeof(*rq), signal_node);
+ struct list_head cb_list;
+
+ if (rq->engine->sched_engine->retire_inflight_request_prio)
+ rq->engine->sched_engine->retire_inflight_request_prio(rq);
+
+ spin_lock(&rq->lock);
+ list_replace(&rq->fence.cb_list, &cb_list);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ __dma_fence_signal__notify(&rq->fence, &cb_list);
+ spin_unlock(&rq->lock);
+
+ i915_request_put(rq);
+ }
+
+ if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
+ intel_breadcrumbs_arm_irq(b);
+}
+
+struct intel_breadcrumbs *
+intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
+{
+ struct intel_breadcrumbs *b;
+
+ b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ kref_init(&b->ref);
+
+ spin_lock_init(&b->signalers_lock);
+ INIT_LIST_HEAD(&b->signalers);
+ init_llist_head(&b->signaled_requests);
+
+ spin_lock_init(&b->irq_lock);
+ init_irq_work(&b->irq_work, signal_irq_work);
+
+ b->irq_engine = irq_engine;
+ b->irq_enable = irq_enable;
+ b->irq_disable = irq_disable;
+
+ return b;
+}
+
+void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
+{
+ unsigned long flags;
+
+ if (!b->irq_engine)
+ return;
+
+ spin_lock_irqsave(&b->irq_lock, flags);
+
+ if (b->irq_enabled)
+ b->irq_enable(b);
+ else
+ b->irq_disable(b);
+
+ spin_unlock_irqrestore(&b->irq_lock, flags);
+}
+
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+{
+ if (!READ_ONCE(b->irq_armed))
+ return;
+
+ /* Kick the work once more to drain the signalers, and disarm the irq */
+ irq_work_sync(&b->irq_work);
+ while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
+ local_irq_disable();
+ signal_irq_work(&b->irq_work);
+ local_irq_enable();
+ cond_resched();
+ }
+}
+
+void intel_breadcrumbs_free(struct kref *kref)
+{
+ struct intel_breadcrumbs *b = container_of(kref, typeof(*b), ref);
+
+ irq_work_sync(&b->irq_work);
+ GEM_BUG_ON(!list_empty(&b->signalers));
+ GEM_BUG_ON(b->irq_armed);
+
+ kfree(b);
+}
+
+static void irq_signal_request(struct i915_request *rq,
+ struct intel_breadcrumbs *b)
+{
+ if (!__dma_fence_signal(&rq->fence))
+ return;
+
+ i915_request_get(rq);
+ if (llist_add(&rq->signal_node, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+}
+
+static void insert_breadcrumb(struct i915_request *rq)
+{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
+ struct intel_context *ce = rq->context;
+ struct list_head *pos;
+
+ if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ return;
+
+ /*
+ * If the request is already completed, we can transfer it
+ * straight onto a signaled list, and queue the irq worker for
+ * its signal completion.
+ */
+ if (__i915_request_is_complete(rq)) {
+ irq_signal_request(rq, b);
+ return;
+ }
+
+ if (list_empty(&ce->signals)) {
+ intel_context_get(ce);
+ add_signaling_context(b, ce);
+ pos = &ce->signals;
+ } else {
+ /*
+ * We keep the seqno in retirement order, so we can break
+ * inside intel_engine_signal_breadcrumbs as soon as we've
+ * passed the last completed request (or seen a request that
+ * hasn't event started). We could walk the timeline->requests,
+ * but keeping a separate signalers_list has the advantage of
+ * hopefully being much smaller than the full list and so
+ * provides faster iteration and detection when there are no
+ * more interrupts required for this context.
+ *
+ * We typically expect to add new signalers in order, so we
+ * start looking for our insertion point from the tail of
+ * the list.
+ */
+ list_for_each_prev(pos, &ce->signals) {
+ struct i915_request *it =
+ list_entry(pos, typeof(*it), signal_link);
+
+ if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
+ break;
+ }
+ }
+
+ i915_request_get(rq);
+ list_add_rcu(&rq->signal_link, pos);
+ GEM_BUG_ON(!check_signal_order(ce, rq));
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
+ set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+ /*
+ * Defer enabling the interrupt to after HW submission and recheck
+ * the request as it may have completed and raised the interrupt as
+ * we were attaching it into the lists.
+ */
+ if (!b->irq_armed || __i915_request_is_complete(rq))
+ irq_work_queue(&b->irq_work);
+}
+
+bool i915_request_enable_breadcrumb(struct i915_request *rq)
+{
+ struct intel_context *ce = rq->context;
+
+ /* Serialises with i915_request_retire() using rq->lock */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return true;
+
+ /*
+ * Peek at i915_request_submit()/i915_request_unsubmit() status.
+ *
+ * If the request is not yet active (and not signaled), we will
+ * attach the breadcrumb later.
+ */
+ if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
+ return true;
+
+ spin_lock(&ce->signal_lock);
+ if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
+ insert_breadcrumb(rq);
+ spin_unlock(&ce->signal_lock);
+
+ return true;
+}
+
+void i915_request_cancel_breadcrumb(struct i915_request *rq)
+{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
+ struct intel_context *ce = rq->context;
+ bool release;
+
+ spin_lock(&ce->signal_lock);
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ spin_unlock(&ce->signal_lock);
+ return;
+ }
+
+ list_del_rcu(&rq->signal_link);
+ release = remove_signaling_context(b, ce);
+ spin_unlock(&ce->signal_lock);
+ if (release)
+ intel_context_put(ce);
+
+ if (__i915_request_is_complete(rq))
+ irq_signal_request(rq, b);
+
+ i915_request_put(rq);
+}
+
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b)
+{
+ struct i915_request *rq, *rn;
+ bool release = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->signal_lock, flags);
+
+ if (list_empty(&ce->signals))
+ goto unlock;
+
+ list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) {
+ GEM_BUG_ON(!__i915_request_is_complete(rq));
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ continue;
+
+ list_del_rcu(&rq->signal_link);
+ irq_signal_request(rq, b);
+ i915_request_put(rq);
+ }
+ release = remove_signaling_context(b, ce);
+
+unlock:
+ spin_unlock_irqrestore(&ce->signal_lock, flags);
+ if (release)
+ intel_context_put(ce);
+
+ while (atomic_read(&b->signaler_active))
+ cpu_relax();
+}
+
+static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ drm_printf(p, "Signals:\n");
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link)
+ drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
+ rq->fence.context, rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+ }
+ rcu_read_unlock();
+}
+
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+ struct drm_printer *p)
+{
+ struct intel_breadcrumbs *b;
+
+ b = engine->breadcrumbs;
+ if (!b)
+ return;
+
+ drm_printf(p, "IRQ: %s\n", str_enabled_disabled(b->irq_armed));
+ if (!list_empty(&b->signalers))
+ print_signals(b, p);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
new file mode 100644
index 000000000..be0d4f379
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_BREADCRUMBS__
+#define __INTEL_BREADCRUMBS__
+
+#include <linux/atomic.h>
+#include <linux/irq_work.h>
+
+#include "intel_breadcrumbs_types.h"
+
+struct drm_printer;
+struct i915_request;
+struct intel_breadcrumbs;
+
+struct intel_breadcrumbs *
+intel_breadcrumbs_create(struct intel_engine_cs *irq_engine);
+void intel_breadcrumbs_free(struct kref *kref);
+
+void intel_breadcrumbs_reset(struct intel_breadcrumbs *b);
+void __intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+
+static inline void intel_breadcrumbs_unpark(struct intel_breadcrumbs *b)
+{
+ atomic_inc(&b->active);
+}
+
+static inline void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+{
+ if (atomic_dec_and_test(&b->active))
+ __intel_breadcrumbs_park(b);
+}
+
+static inline void
+intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
+{
+ irq_work_queue(&engine->breadcrumbs->irq_work);
+}
+
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+ struct drm_printer *p);
+
+bool i915_request_enable_breadcrumb(struct i915_request *request);
+void i915_request_cancel_breadcrumb(struct i915_request *request);
+
+void intel_context_remove_breadcrumbs(struct intel_context *ce,
+ struct intel_breadcrumbs *b);
+
+static inline struct intel_breadcrumbs *
+intel_breadcrumbs_get(struct intel_breadcrumbs *b)
+{
+ kref_get(&b->ref);
+ return b;
+}
+
+static inline void intel_breadcrumbs_put(struct intel_breadcrumbs *b)
+{
+ kref_put(&b->ref, intel_breadcrumbs_free);
+}
+
+#endif /* __INTEL_BREADCRUMBS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
new file mode 100644
index 000000000..72dfd3748
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_BREADCRUMBS_TYPES__
+#define __INTEL_BREADCRUMBS_TYPES__
+
+#include <linux/irq_work.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "intel_engine_types.h"
+
+/*
+ * Rather than have every client wait upon all user interrupts,
+ * with the herd waking after every interrupt and each doing the
+ * heavyweight seqno dance, we delegate the task (of being the
+ * bottom-half of the user interrupt) to the first client. After
+ * every interrupt, we wake up one client, who does the heavyweight
+ * coherent seqno read and either goes back to sleep (if incomplete),
+ * or wakes up all the completed clients in parallel, before then
+ * transferring the bottom-half status to the next client in the queue.
+ *
+ * Compared to walking the entire list of waiters in a single dedicated
+ * bottom-half, we reduce the latency of the first waiter by avoiding
+ * a context switch, but incur additional coherent seqno reads when
+ * following the chain of request breadcrumbs. Since it is most likely
+ * that we have a single client waiting on each seqno, then reducing
+ * the overhead of waking that client is much preferred.
+ */
+struct intel_breadcrumbs {
+ struct kref ref;
+ atomic_t active;
+
+ spinlock_t signalers_lock; /* protects the list of signalers */
+ struct list_head signalers;
+ struct llist_head signaled_requests;
+ atomic_t signaler_active;
+
+ spinlock_t irq_lock; /* protects the interrupt from hardirq context */
+ struct irq_work irq_work; /* for use from inside irq_lock */
+ unsigned int irq_enabled;
+ bool irq_armed;
+
+ /* Not all breadcrumbs are attached to physical HW */
+ intel_engine_mask_t engine_mask;
+ struct intel_engine_cs *irq_engine;
+ bool (*irq_enable)(struct intel_breadcrumbs *b);
+ void (*irq_disable)(struct intel_breadcrumbs *b);
+};
+
+#endif /* __INTEL_BREADCRUMBS_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
new file mode 100644
index 000000000..2aa63ec52
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+#include "intel_context.h"
+#include "intel_engine.h"
+#include "intel_engine_pm.h"
+#include "intel_ring.h"
+
+static struct kmem_cache *slab_ce;
+
+static struct intel_context *intel_context_alloc(void)
+{
+ return kmem_cache_zalloc(slab_ce, GFP_KERNEL);
+}
+
+static void rcu_context_free(struct rcu_head *rcu)
+{
+ struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
+
+ trace_intel_context_free(ce);
+ kmem_cache_free(slab_ce, ce);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+ call_rcu(&ce->rcu, rcu_context_free);
+}
+
+struct intel_context *
+intel_context_create(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_alloc();
+ if (!ce)
+ return ERR_PTR(-ENOMEM);
+
+ intel_context_init(ce, engine);
+ trace_intel_context_create(ce);
+ return ce;
+}
+
+int intel_context_alloc_state(struct intel_context *ce)
+{
+ int err = 0;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return -EINTR;
+
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_is_banned(ce)) {
+ err = -EIO;
+ goto unlock;
+ }
+
+ err = ce->ops->alloc(ce);
+ if (unlikely(err))
+ goto unlock;
+
+ set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+ }
+
+unlock:
+ mutex_unlock(&ce->pin_mutex);
+ return err;
+}
+
+static int intel_context_active_acquire(struct intel_context *ce)
+{
+ int err;
+
+ __i915_active_acquire(&ce->active);
+
+ if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) ||
+ intel_context_is_parallel(ce))
+ return 0;
+
+ /* Preallocate tracking nodes */
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err)
+ i915_active_release(&ce->active);
+
+ return err;
+}
+
+static void intel_context_active_release(struct intel_context *ce)
+{
+ /* Nodes preallocated in intel_context_active() */
+ i915_active_acquire_barrier(&ce->active);
+ i915_active_release(&ce->active);
+}
+
+static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
+{
+ unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
+ int err;
+
+ err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
+ if (err)
+ return err;
+
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unpin;
+
+ /*
+ * And mark it as a globally pinned object to let the shrinker know
+ * it cannot reclaim the object until we release it.
+ */
+ i915_vma_make_unshrinkable(vma);
+ vma->obj->mm.dirty = true;
+
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static void __context_unpin_state(struct i915_vma *vma)
+{
+ i915_vma_make_shrinkable(vma);
+ i915_active_release(&vma->active);
+ __i915_vma_unpin(vma);
+}
+
+static int __ring_active(struct intel_ring *ring,
+ struct i915_gem_ww_ctx *ww)
+{
+ int err;
+
+ err = intel_ring_pin(ring, ww);
+ if (err)
+ return err;
+
+ err = i915_active_acquire(&ring->vma->active);
+ if (err)
+ goto err_pin;
+
+ return 0;
+
+err_pin:
+ intel_ring_unpin(ring);
+ return err;
+}
+
+static void __ring_retire(struct intel_ring *ring)
+{
+ i915_active_release(&ring->vma->active);
+ intel_ring_unpin(ring);
+}
+
+static int intel_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ int err;
+
+ CE_TRACE(ce, "active\n");
+
+ err = __ring_active(ce->ring, ww);
+ if (err)
+ return err;
+
+ err = intel_timeline_pin(ce->timeline, ww);
+ if (err)
+ goto err_ring;
+
+ if (!ce->state)
+ return 0;
+
+ err = __context_pin_state(ce->state, ww);
+ if (err)
+ goto err_timeline;
+
+
+ return 0;
+
+err_timeline:
+ intel_timeline_unpin(ce->timeline);
+err_ring:
+ __ring_retire(ce->ring);
+ return err;
+}
+
+static void intel_context_post_unpin(struct intel_context *ce)
+{
+ if (ce->state)
+ __context_unpin_state(ce->state);
+
+ intel_timeline_unpin(ce->timeline);
+ __ring_retire(ce->ring);
+}
+
+int __intel_context_do_pin_ww(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ bool handoff = false;
+ void *vaddr;
+ int err = 0;
+
+ if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
+ err = intel_context_alloc_state(ce);
+ if (err)
+ return err;
+ }
+
+ /*
+ * We always pin the context/ring/timeline here, to ensure a pin
+ * refcount for __intel_context_active(), which prevent a lock
+ * inversion of ce->pin_mutex vs dma_resv_lock().
+ */
+
+ err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
+ if (!err)
+ err = i915_gem_object_lock(ce->ring->vma->obj, ww);
+ if (!err && ce->state)
+ err = i915_gem_object_lock(ce->state->obj, ww);
+ if (!err)
+ err = intel_context_pre_pin(ce, ww);
+ if (err)
+ return err;
+
+ err = ce->ops->pre_pin(ce, ww, &vaddr);
+ if (err)
+ goto err_ctx_unpin;
+
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ goto err_post_unpin;
+
+ err = mutex_lock_interruptible(&ce->pin_mutex);
+ if (err)
+ goto err_release;
+
+ intel_engine_pm_might_get(ce->engine);
+
+ if (unlikely(intel_context_is_closed(ce))) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
+ if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
+ err = intel_context_active_acquire(ce);
+ if (unlikely(err))
+ goto err_unlock;
+
+ err = ce->ops->pin(ce, vaddr);
+ if (err) {
+ intel_context_active_release(ce);
+ goto err_unlock;
+ }
+
+ CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
+ i915_ggtt_offset(ce->ring->vma),
+ ce->ring->head, ce->ring->tail);
+
+ handoff = true;
+ smp_mb__before_atomic(); /* flush pin before it is visible */
+ atomic_inc(&ce->pin_count);
+ }
+
+ GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+
+ trace_intel_context_do_pin(ce);
+
+err_unlock:
+ mutex_unlock(&ce->pin_mutex);
+err_release:
+ i915_active_release(&ce->active);
+err_post_unpin:
+ if (!handoff)
+ ce->ops->post_unpin(ce);
+err_ctx_unpin:
+ intel_context_post_unpin(ce);
+
+ /*
+ * Unlock the hwsp_ggtt object since it's shared.
+ * In principle we can unlock all the global state locked above
+ * since it's pinned and doesn't need fencing, and will
+ * thus remain resident until it is explicitly unpinned.
+ */
+ i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
+
+ return err;
+}
+
+int __intel_context_do_pin(struct intel_context *ce)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = __intel_context_do_pin_ww(ce, &ww);
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ return err;
+}
+
+void __intel_context_do_unpin(struct intel_context *ce, int sub)
+{
+ if (!atomic_sub_and_test(sub, &ce->pin_count))
+ return;
+
+ CE_TRACE(ce, "unpin\n");
+ ce->ops->unpin(ce);
+ ce->ops->post_unpin(ce);
+
+ /*
+ * Once released, we may asynchronously drop the active reference.
+ * As that may be the only reference keeping the context alive,
+ * take an extra now so that it is not freed before we finish
+ * dereferencing it.
+ */
+ intel_context_get(ce);
+ intel_context_active_release(ce);
+ trace_intel_context_do_unpin(ce);
+ intel_context_put(ce);
+}
+
+static void __intel_context_retire(struct i915_active *active)
+{
+ struct intel_context *ce = container_of(active, typeof(*ce), active);
+
+ CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
+ intel_context_get_total_runtime_ns(ce),
+ intel_context_get_avg_runtime_ns(ce));
+
+ set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ intel_context_post_unpin(ce);
+ intel_context_put(ce);
+}
+
+static int __intel_context_active(struct i915_active *active)
+{
+ struct intel_context *ce = container_of(active, typeof(*ce), active);
+
+ intel_context_get(ce);
+
+ /* everything should already be activated by intel_context_pre_pin() */
+ GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
+ __intel_ring_pin(ce->ring);
+
+ __intel_timeline_pin(ce->timeline);
+
+ if (ce->state) {
+ GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
+ __i915_vma_pin(ce->state);
+ i915_vma_make_unshrinkable(ce->state);
+ }
+
+ return 0;
+}
+
+static int
+sw_fence_dummy_notify(struct i915_sw_fence *sf,
+ enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+void
+intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!engine->cops);
+ GEM_BUG_ON(!engine->gt->vm);
+
+ kref_init(&ce->ref);
+
+ ce->engine = engine;
+ ce->ops = engine->cops;
+ ce->sseu = engine->sseu;
+ ce->ring = NULL;
+ ce->ring_size = SZ_4K;
+
+ ewma_runtime_init(&ce->stats.runtime.avg);
+
+ ce->vm = i915_vm_get(engine->gt->vm);
+
+ /* NB ce->signal_link/lock is used under RCU */
+ spin_lock_init(&ce->signal_lock);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ spin_lock_init(&ce->guc_state.lock);
+ INIT_LIST_HEAD(&ce->guc_state.fences);
+ INIT_LIST_HEAD(&ce->guc_state.requests);
+
+ ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
+ INIT_LIST_HEAD(&ce->guc_id.link);
+
+ INIT_LIST_HEAD(&ce->destroyed_link);
+
+ INIT_LIST_HEAD(&ce->parallel.child_list);
+
+ /*
+ * Initialize fence to be complete as this is expected to be complete
+ * unless there is a pending schedule disable outstanding.
+ */
+ i915_sw_fence_init(&ce->guc_state.blocked,
+ sw_fence_dummy_notify);
+ i915_sw_fence_commit(&ce->guc_state.blocked);
+
+ i915_active_init(&ce->active,
+ __intel_context_active, __intel_context_retire, 0);
+}
+
+void intel_context_fini(struct intel_context *ce)
+{
+ struct intel_context *child, *next;
+
+ if (ce->timeline)
+ intel_timeline_put(ce->timeline);
+ i915_vm_put(ce->vm);
+
+ /* Need to put the creation ref for the children */
+ if (intel_context_is_parent(ce))
+ for_each_child_safe(ce, child, next)
+ intel_context_put(child);
+
+ mutex_destroy(&ce->pin_mutex);
+ i915_active_fini(&ce->active);
+ i915_sw_fence_fini(&ce->guc_state.blocked);
+}
+
+void i915_context_module_exit(void)
+{
+ kmem_cache_destroy(slab_ce);
+}
+
+int __init i915_context_module_init(void)
+{
+ slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+ if (!slab_ce)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void intel_context_enter_engine(struct intel_context *ce)
+{
+ intel_engine_pm_get(ce->engine);
+ intel_timeline_enter(ce->timeline);
+}
+
+void intel_context_exit_engine(struct intel_context *ce)
+{
+ intel_timeline_exit(ce->timeline);
+ intel_engine_pm_put(ce->engine);
+}
+
+int intel_context_prepare_remote_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ struct intel_timeline *tl = ce->timeline;
+ int err;
+
+ /* Only suitable for use in remotely modifying this context */
+ GEM_BUG_ON(rq->context == ce);
+
+ if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
+ /* Queue this switch after current activity by this context. */
+ err = i915_active_fence_set(&tl->last_request, rq);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Guarantee context image and the timeline remains pinned until the
+ * modifying request is retired by setting the ce activity tracker.
+ *
+ * But we only need to take one pin on the account of it. Or in other
+ * words transfer the pinned ce object to tracked active request.
+ */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ return i915_active_add_request(&ce->active, rq);
+}
+
+struct i915_request *intel_context_create_request(struct intel_context *ce)
+{
+ struct i915_gem_ww_ctx ww;
+ struct i915_request *rq;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = intel_context_pin_ww(ce, &ww);
+ if (!err) {
+ rq = i915_request_create(ce);
+ intel_context_unpin(ce);
+ } else if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ rq = ERR_PTR(err);
+ } else {
+ rq = ERR_PTR(err);
+ }
+
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (IS_ERR(rq))
+ return rq;
+
+ /*
+ * timeline->mutex should be the inner lock, but is used as outer lock.
+ * Hack around this to shut up lockdep in selftests..
+ */
+ lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
+ mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
+ mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+ rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
+
+ return rq;
+}
+
+struct i915_request *intel_context_get_active_request(struct intel_context *ce)
+{
+ struct intel_context *parent = intel_context_to_parent(ce);
+ struct i915_request *rq, *active = NULL;
+ unsigned long flags;
+
+ GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
+
+ /*
+ * We search the parent list to find an active request on the submitted
+ * context. The parent list contains the requests for all the contexts
+ * in the relationship so we have to do a compare of each request's
+ * context.
+ */
+ spin_lock_irqsave(&parent->guc_state.lock, flags);
+ list_for_each_entry_reverse(rq, &parent->guc_state.requests,
+ sched.link) {
+ if (rq->context != ce)
+ continue;
+ if (i915_request_completed(rq))
+ break;
+
+ active = rq;
+ }
+ if (active)
+ active = i915_request_get_rcu(active);
+ spin_unlock_irqrestore(&parent->guc_state.lock, flags);
+
+ return active;
+}
+
+void intel_context_bind_parent_child(struct intel_context *parent,
+ struct intel_context *child)
+{
+ /*
+ * Callers responsibility to validate that this function is used
+ * correctly but we use GEM_BUG_ON here ensure that they do.
+ */
+ GEM_BUG_ON(intel_context_is_pinned(parent));
+ GEM_BUG_ON(intel_context_is_child(parent));
+ GEM_BUG_ON(intel_context_is_pinned(child));
+ GEM_BUG_ON(intel_context_is_child(child));
+ GEM_BUG_ON(intel_context_is_parent(child));
+
+ parent->parallel.child_index = parent->parallel.number_children++;
+ list_add_tail(&child->parallel.child_link,
+ &parent->parallel.child_list);
+ child->parallel.parent = parent;
+}
+
+u64 intel_context_get_total_runtime_ns(const struct intel_context *ce)
+{
+ u64 total, active;
+
+ total = ce->stats.runtime.total;
+ if (ce->ops->flags & COPS_RUNTIME_CYCLES)
+ total *= ce->engine->gt->clock_period_ns;
+
+ active = READ_ONCE(ce->stats.active);
+ if (active)
+ active = intel_context_clock() - active;
+
+ return total + active;
+}
+
+u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+{
+ u64 avg = ewma_runtime_read(&ce->stats.runtime.avg);
+
+ if (ce->ops->flags & COPS_RUNTIME_CYCLES)
+ avg *= ce->engine->gt->clock_period_ns;
+
+ return avg;
+}
+
+bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
+{
+ bool ret = intel_context_set_banned(ce);
+
+ trace_intel_context_ban(ce);
+
+ if (ce->ops->revoke)
+ ce->ops->revoke(ce, rq,
+ INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS);
+
+ return ret;
+}
+
+bool intel_context_revoke(struct intel_context *ce)
+{
+ bool ret = intel_context_set_exiting(ce);
+
+ if (ce->ops->revoke)
+ ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_context.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
new file mode 100644
index 000000000..4ab6c8ddd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_H__
+#define __INTEL_CONTEXT_H__
+
+#include <linux/bitops.h>
+#include <linux/lockdep.h>
+#include <linux/types.h>
+
+#include "i915_active.h"
+#include "i915_drv.h"
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+#include "intel_ring_types.h"
+#include "intel_timeline_types.h"
+#include "i915_trace.h"
+
+#define CE_TRACE(ce, fmt, ...) do { \
+ const struct intel_context *ce__ = (ce); \
+ ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
+ ce__->timeline->fence_context, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS (1)
+
+struct i915_gem_ww_ctx;
+
+void intel_context_init(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+void intel_context_fini(struct intel_context *ce);
+
+void i915_context_module_exit(void);
+int i915_context_module_init(void);
+
+struct intel_context *
+intel_context_create(struct intel_engine_cs *engine);
+
+int intel_context_alloc_state(struct intel_context *ce);
+
+void intel_context_free(struct intel_context *ce);
+
+int intel_context_reconfigure_sseu(struct intel_context *ce,
+ const struct intel_sseu sseu);
+
+#define PARENT_SCRATCH_SIZE PAGE_SIZE
+
+static inline bool intel_context_is_child(struct intel_context *ce)
+{
+ return !!ce->parallel.parent;
+}
+
+static inline bool intel_context_is_parent(struct intel_context *ce)
+{
+ return !!ce->parallel.number_children;
+}
+
+static inline bool intel_context_is_pinned(struct intel_context *ce);
+
+static inline struct intel_context *
+intel_context_to_parent(struct intel_context *ce)
+{
+ if (intel_context_is_child(ce)) {
+ /*
+ * The parent holds ref count to the child so it is always safe
+ * for the parent to access the child, but the child has a
+ * pointer to the parent without a ref. To ensure this is safe
+ * the child should only access the parent pointer while the
+ * parent is pinned.
+ */
+ GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
+
+ return ce->parallel.parent;
+ } else {
+ return ce;
+ }
+}
+
+static inline bool intel_context_is_parallel(struct intel_context *ce)
+{
+ return intel_context_is_child(ce) || intel_context_is_parent(ce);
+}
+
+void intel_context_bind_parent_child(struct intel_context *parent,
+ struct intel_context *child);
+
+#define for_each_child(parent, ce)\
+ list_for_each_entry(ce, &(parent)->parallel.child_list,\
+ parallel.child_link)
+#define for_each_child_safe(parent, ce, cn)\
+ list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\
+ parallel.child_link)
+
+/**
+ * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
+ * @ce - the context
+ *
+ * Acquire a lock on the pinned status of the HW context, such that the context
+ * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
+ * intel_context_is_pinned() remains stable.
+ */
+static inline int intel_context_lock_pinned(struct intel_context *ce)
+ __acquires(ce->pin_mutex)
+{
+ return mutex_lock_interruptible(&ce->pin_mutex);
+}
+
+/**
+ * intel_context_is_pinned - Reports the 'pinned' status
+ * @ce - the context
+ *
+ * While in use by the GPU, the context, along with its ring and page
+ * tables is pinned into memory and the GTT.
+ *
+ * Returns: true if the context is currently pinned for use by the GPU.
+ */
+static inline bool
+intel_context_is_pinned(struct intel_context *ce)
+{
+ return atomic_read(&ce->pin_count);
+}
+
+static inline void intel_context_cancel_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ GEM_BUG_ON(!ce->ops->cancel_request);
+ return ce->ops->cancel_request(ce, rq);
+}
+
+/**
+ * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
+ * @ce - the context
+ *
+ * Releases the lock earlier acquired by intel_context_unlock_pinned().
+ */
+static inline void intel_context_unlock_pinned(struct intel_context *ce)
+ __releases(ce->pin_mutex)
+{
+ mutex_unlock(&ce->pin_mutex);
+}
+
+int __intel_context_do_pin(struct intel_context *ce);
+int __intel_context_do_pin_ww(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww);
+
+static inline bool intel_context_pin_if_active(struct intel_context *ce)
+{
+ return atomic_inc_not_zero(&ce->pin_count);
+}
+
+static inline int intel_context_pin(struct intel_context *ce)
+{
+ if (likely(intel_context_pin_if_active(ce)))
+ return 0;
+
+ return __intel_context_do_pin(ce);
+}
+
+static inline int intel_context_pin_ww(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ if (likely(intel_context_pin_if_active(ce)))
+ return 0;
+
+ return __intel_context_do_pin_ww(ce, ww);
+}
+
+static inline void __intel_context_pin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ atomic_inc(&ce->pin_count);
+}
+
+void __intel_context_do_unpin(struct intel_context *ce, int sub);
+
+static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
+{
+ __intel_context_do_unpin(ce, 2);
+}
+
+static inline void intel_context_unpin(struct intel_context *ce)
+{
+ if (!ce->ops->sched_disable) {
+ __intel_context_do_unpin(ce, 1);
+ } else {
+ /*
+ * Move ownership of this pin to the scheduling disable which is
+ * an async operation. When that operation completes the above
+ * intel_context_sched_disable_unpin is called potentially
+ * unpinning the context.
+ */
+ while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
+ if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
+ ce->ops->sched_disable(ce);
+ break;
+ }
+ }
+ }
+}
+
+void intel_context_enter_engine(struct intel_context *ce);
+void intel_context_exit_engine(struct intel_context *ce);
+
+static inline void intel_context_enter(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->timeline->mutex);
+ if (!ce->active_count++)
+ ce->ops->enter(ce);
+}
+
+static inline void intel_context_mark_active(struct intel_context *ce)
+{
+ lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
+ test_bit(CONTEXT_IS_PARKING, &ce->flags));
+ ++ce->active_count;
+}
+
+static inline void intel_context_exit(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->timeline->mutex);
+ GEM_BUG_ON(!ce->active_count);
+ if (!--ce->active_count)
+ ce->ops->exit(ce);
+}
+
+static inline struct intel_context *intel_context_get(struct intel_context *ce)
+{
+ kref_get(&ce->ref);
+ return ce;
+}
+
+static inline void intel_context_put(struct intel_context *ce)
+{
+ kref_put(&ce->ref, ce->ops->destroy);
+}
+
+static inline struct intel_timeline *__must_check
+intel_context_timeline_lock(struct intel_context *ce)
+ __acquires(&ce->timeline->mutex)
+{
+ struct intel_timeline *tl = ce->timeline;
+ int err;
+
+ if (intel_context_is_parent(ce))
+ err = mutex_lock_interruptible_nested(&tl->mutex, 0);
+ else if (intel_context_is_child(ce))
+ err = mutex_lock_interruptible_nested(&tl->mutex,
+ ce->parallel.child_index + 1);
+ else
+ err = mutex_lock_interruptible(&tl->mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ return tl;
+}
+
+static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
+ __releases(&tl->mutex)
+{
+ mutex_unlock(&tl->mutex);
+}
+
+int intel_context_prepare_remote_request(struct intel_context *ce,
+ struct i915_request *rq);
+
+struct i915_request *intel_context_create_request(struct intel_context *ce);
+
+struct i915_request *intel_context_get_active_request(struct intel_context *ce);
+
+static inline bool intel_context_is_barrier(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+}
+
+static inline bool intel_context_is_closed(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+}
+
+static inline bool intel_context_has_inflight(const struct intel_context *ce)
+{
+ return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
+}
+
+static inline bool intel_context_use_semaphores(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_set_use_semaphores(struct intel_context *ce)
+{
+ set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
+{
+ clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline bool intel_context_is_banned(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool intel_context_set_banned(struct intel_context *ce)
+{
+ return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+bool intel_context_ban(struct intel_context *ce, struct i915_request *rq);
+
+static inline bool intel_context_is_schedulable(const struct intel_context *ce)
+{
+ return !test_bit(CONTEXT_EXITING, &ce->flags) &&
+ !test_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool intel_context_is_exiting(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_EXITING, &ce->flags);
+}
+
+static inline bool intel_context_set_exiting(struct intel_context *ce)
+{
+ return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
+}
+
+bool intel_context_revoke(struct intel_context *ce);
+
+static inline bool
+intel_context_force_single_submission(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline void
+intel_context_set_single_submission(struct intel_context *ce)
+{
+ __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline bool
+intel_context_nopreempt(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_set_nopreempt(struct intel_context *ce)
+{
+ set_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_clear_nopreempt(struct intel_context *ce)
+{
+ clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+u64 intel_context_get_total_runtime_ns(const struct intel_context *ce);
+u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);
+
+static inline u64 intel_context_clock(void)
+{
+ /* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */
+ return ktime_get_raw_fast_ns();
+}
+
+#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
new file mode 100644
index 000000000..0c69cb42d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_CONTEXT_PARAM_H
+#define INTEL_CONTEXT_PARAM_H
+
+#include <linux/types.h>
+
+#include "intel_context.h"
+
+static inline void
+intel_context_set_watchdog_us(struct intel_context *ce, u64 timeout_us)
+{
+ ce->watchdog.timeout_us = timeout_us;
+}
+
+#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
new file mode 100644
index 000000000..ece16c2b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_ring.h"
+#include "intel_sseu.h"
+
+static int gen8_emit_rpcs_config(struct i915_request *rq,
+ const struct intel_context *ce,
+ const struct intel_sseu sseu)
+{
+ u64 offset;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ offset = i915_ggtt_offset(ce->state) +
+ LRC_STATE_OFFSET + CTX_R_PWR_CLK_STATE * 4;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
+{
+ struct i915_request *rq;
+ int ret;
+
+ lockdep_assert_held(&ce->pin_mutex);
+
+ /*
+ * If the context is not idle, we have to submit an ordered request to
+ * modify its context image via the kernel context (writing to our own
+ * image, or into the registers directory, does not stick). Pristine
+ * and idle contexts will be configured on pinning.
+ */
+ if (!intel_context_pin_if_active(ce))
+ return 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ /* Serialise with the remote context */
+ ret = intel_context_prepare_remote_request(ce, rq);
+ if (ret == 0)
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
+
+ i915_request_add(rq);
+out_unpin:
+ intel_context_unpin(ce);
+ return ret;
+}
+
+int
+intel_context_reconfigure_sseu(struct intel_context *ce,
+ const struct intel_sseu sseu)
+{
+ int ret;
+
+ GEM_BUG_ON(GRAPHICS_VER(ce->engine->i915) < 8);
+
+ ret = intel_context_lock_pinned(ce);
+ if (ret)
+ return ret;
+
+ /* Nothing to do if unmodified. */
+ if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
+ goto unlock;
+
+ ret = gen8_modify_rpcs(ce, sseu);
+ if (!ret)
+ ce->sseu = sseu;
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
new file mode 100644
index 000000000..04eacae1a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_TYPES__
+#define __INTEL_CONTEXT_TYPES__
+
+#include <linux/average.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+#include "i915_sw_fence.h"
+#include "i915_utils.h"
+#include "intel_engine_types.h"
+#include "intel_sseu.h"
+
+#include "uc/intel_guc_fwif.h"
+
+#define CONTEXT_REDZONE POISON_INUSE
+DECLARE_EWMA(runtime, 3, 8);
+
+struct i915_gem_context;
+struct i915_gem_ww_ctx;
+struct i915_vma;
+struct intel_breadcrumbs;
+struct intel_context;
+struct intel_ring;
+
+struct intel_context_ops {
+ unsigned long flags;
+#define COPS_HAS_INFLIGHT_BIT 0
+#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
+
+#define COPS_RUNTIME_CYCLES_BIT 1
+#define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT)
+
+ int (*alloc)(struct intel_context *ce);
+
+ void (*revoke)(struct intel_context *ce, struct i915_request *rq,
+ unsigned int preempt_timeout_ms);
+
+ int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
+ int (*pin)(struct intel_context *ce, void *vaddr);
+ void (*unpin)(struct intel_context *ce);
+ void (*post_unpin)(struct intel_context *ce);
+
+ void (*cancel_request)(struct intel_context *ce,
+ struct i915_request *rq);
+
+ void (*enter)(struct intel_context *ce);
+ void (*exit)(struct intel_context *ce);
+
+ void (*sched_disable)(struct intel_context *ce);
+
+ void (*reset)(struct intel_context *ce);
+ void (*destroy)(struct kref *kref);
+
+ /* virtual/parallel engine/context interface */
+ struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
+ unsigned int count,
+ unsigned long flags);
+ struct intel_context *(*create_parallel)(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width);
+ struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
+ unsigned int sibling);
+};
+
+struct intel_context {
+ /*
+ * Note: Some fields may be accessed under RCU.
+ *
+ * Unless otherwise noted a field can safely be assumed to be protected
+ * by strong reference counting.
+ */
+ union {
+ struct kref ref; /* no kref_get_unless_zero()! */
+ struct rcu_head rcu;
+ };
+
+ struct intel_engine_cs *engine;
+ struct intel_engine_cs *inflight;
+#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
+#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
+#define intel_context_inflight(ce) \
+ __intel_context_inflight(READ_ONCE((ce)->inflight))
+#define intel_context_inflight_count(ce) \
+ __intel_context_inflight_count(READ_ONCE((ce)->inflight))
+
+ struct i915_address_space *vm;
+ struct i915_gem_context __rcu *gem_context;
+
+ /*
+ * @signal_lock protects the list of requests that need signaling,
+ * @signals. While there are any requests that need signaling,
+ * we add the context to the breadcrumbs worker, and remove it
+ * upon completion/cancellation of the last request.
+ */
+ struct list_head signal_link; /* Accessed under RCU */
+ struct list_head signals; /* Guarded by signal_lock */
+ spinlock_t signal_lock; /* protects signals, the list of requests */
+
+ struct i915_vma *state;
+ u32 ring_size;
+ struct intel_ring *ring;
+ struct intel_timeline *timeline;
+
+ unsigned long flags;
+#define CONTEXT_BARRIER_BIT 0
+#define CONTEXT_ALLOC_BIT 1
+#define CONTEXT_INIT_BIT 2
+#define CONTEXT_VALID_BIT 3
+#define CONTEXT_CLOSED_BIT 4
+#define CONTEXT_USE_SEMAPHORES 5
+#define CONTEXT_BANNED 6
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
+#define CONTEXT_NOPREEMPT 8
+#define CONTEXT_LRCA_DIRTY 9
+#define CONTEXT_GUC_INIT 10
+#define CONTEXT_PERMA_PIN 11
+#define CONTEXT_IS_PARKING 12
+#define CONTEXT_EXITING 13
+
+ struct {
+ u64 timeout_us;
+ } watchdog;
+
+ u32 *lrc_reg_state;
+ union {
+ struct {
+ u32 lrca;
+ u32 ccid;
+ };
+ u64 desc;
+ } lrc;
+ u32 tag; /* cookie passed to HW to track this context on submission */
+
+ /** stats: Context GPU engine busyness tracking. */
+ struct intel_context_stats {
+ u64 active;
+
+ /* Time on GPU as tracked by the hw. */
+ struct {
+ struct ewma_runtime avg;
+ u64 total;
+ u32 last;
+ I915_SELFTEST_DECLARE(u32 num_underflow);
+ I915_SELFTEST_DECLARE(u32 max_underflow);
+ } runtime;
+ } stats;
+
+ unsigned int active_count; /* protected by timeline->mutex */
+
+ atomic_t pin_count;
+ struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+
+ /**
+ * active: Active tracker for the rq activity (inc. external) on this
+ * intel_context object.
+ */
+ struct i915_active active;
+
+ const struct intel_context_ops *ops;
+
+ /** sseu: Control eu/slice partitioning */
+ struct intel_sseu sseu;
+
+ /**
+ * pinned_contexts_link: List link for the engine's pinned contexts.
+ * This is only used if this is a perma-pinned kernel context and
+ * the list is assumed to only be manipulated during driver load
+ * or unload time so no mutex protection currently.
+ */
+ struct list_head pinned_contexts_link;
+
+ u8 wa_bb_page; /* if set, page num reserved for context workarounds */
+
+ struct {
+ /** @lock: protects everything in guc_state */
+ spinlock_t lock;
+ /**
+ * @sched_state: scheduling state of this context using GuC
+ * submission
+ */
+ u32 sched_state;
+ /*
+ * @fences: maintains a list of requests that are currently
+ * being fenced until a GuC operation completes
+ */
+ struct list_head fences;
+ /**
+ * @blocked: fence used to signal when the blocking of a
+ * context's submissions is complete.
+ */
+ struct i915_sw_fence blocked;
+ /** @number_committed_requests: number of committed requests */
+ int number_committed_requests;
+ /** @requests: list of active requests on this context */
+ struct list_head requests;
+ /** @prio: the context's current guc priority */
+ u8 prio;
+ /**
+ * @prio_count: a counter of the number requests in flight in
+ * each priority bucket
+ */
+ u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
+ } guc_state;
+
+ struct {
+ /**
+ * @id: handle which is used to uniquely identify this context
+ * with the GuC, protected by guc->submission_state.lock
+ */
+ u16 id;
+ /**
+ * @ref: the number of references to the guc_id, when
+ * transitioning in and out of zero protected by
+ * guc->submission_state.lock
+ */
+ atomic_t ref;
+ /**
+ * @link: in guc->guc_id_list when the guc_id has no refs but is
+ * still valid, protected by guc->submission_state.lock
+ */
+ struct list_head link;
+ } guc_id;
+
+ /**
+ * @destroyed_link: link in guc->submission_state.destroyed_contexts, in
+ * list when context is pending to be destroyed (deregistered with the
+ * GuC), protected by guc->submission_state.lock
+ */
+ struct list_head destroyed_link;
+
+ /** @parallel: sub-structure for parallel submission members */
+ struct {
+ union {
+ /**
+ * @child_list: parent's list of children
+ * contexts, no protection as immutable after context
+ * creation
+ */
+ struct list_head child_list;
+ /**
+ * @child_link: child's link into parent's list of
+ * children
+ */
+ struct list_head child_link;
+ };
+ /** @parent: pointer to parent if child */
+ struct intel_context *parent;
+ /**
+ * @last_rq: last request submitted on a parallel context, used
+ * to insert submit fences between requests in the parallel
+ * context
+ */
+ struct i915_request *last_rq;
+ /**
+ * @fence_context: fence context composite fence when doing
+ * parallel submission
+ */
+ u64 fence_context;
+ /**
+ * @seqno: seqno for composite fence when doing parallel
+ * submission
+ */
+ u32 seqno;
+ /** @number_children: number of children if parent */
+ u8 number_children;
+ /** @child_index: index into child_list if child */
+ u8 child_index;
+ /** @guc: GuC specific members for parallel submission */
+ struct {
+ /** @wqi_head: cached head pointer in work queue */
+ u16 wqi_head;
+ /** @wqi_tail: cached tail pointer in work queue */
+ u16 wqi_tail;
+ /** @wq_head: pointer to the actual head in work queue */
+ u32 *wq_head;
+ /** @wq_tail: pointer to the actual head in work queue */
+ u32 *wq_tail;
+ /** @wq_status: pointer to the status in work queue */
+ u32 *wq_status;
+
+ /**
+ * @parent_page: page in context state (ce->state) used
+ * by parent for work queue, process descriptor
+ */
+ u8 parent_page;
+ } guc;
+ } parallel;
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ /**
+ * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
+ */
+ bool drop_schedule_enable;
+
+ /**
+ * @drop_schedule_disable: Force drop of schedule disable G2H for
+ * selftest
+ */
+ bool drop_schedule_disable;
+
+ /**
+ * @drop_deregister: Force drop of deregister G2H for selftest
+ */
+ bool drop_deregister;
+#endif
+};
+
+#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
new file mode 100644
index 000000000..7a4504ea3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+#include <asm/cacheflush.h>
+#include <drm/drm_util.h>
+#include <drm/drm_cache.h>
+
+#include <linux/hashtable.h>
+#include <linux/irq_work.h>
+#include <linux/random.h>
+#include <linux/seqlock.h>
+
+#include "i915_pmu.h"
+#include "i915_request.h"
+#include "i915_selftest.h"
+#include "intel_engine_types.h"
+#include "intel_gt_types.h"
+#include "intel_timeline.h"
+#include "intel_workarounds.h"
+
+struct drm_printer;
+struct intel_context;
+struct intel_gt;
+struct lock_class_key;
+
+/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+
+#define ENGINE_TRACE(e, fmt, ...) do { \
+ const struct intel_engine_cs *e__ __maybe_unused = (e); \
+ GEM_TRACE("%s %s: " fmt, \
+ dev_name(e__->i915->drm.dev), e__->name, \
+ ##__VA_ARGS__); \
+} while (0)
+
+/*
+ * The register defines to be used with the following macros need to accept a
+ * base param, e.g:
+ *
+ * REG_FOO(base) _MMIO((base) + <relative offset>)
+ * ENGINE_READ(engine, REG_FOO);
+ *
+ * register arrays are to be defined and accessed as follows:
+ *
+ * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
+ * ENGINE_READ_IDX(engine, REG_BAR, i)
+ */
+
+#define __ENGINE_REG_OP(op__, engine__, ...) \
+ intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
+
+#define __ENGINE_READ_OP(op__, engine__, reg__) \
+ __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
+
+#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
+#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
+#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
+#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
+
+#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
+ __ENGINE_REG_OP(read64_2x32, (engine__), \
+ lower_reg__((engine__)->mmio_base), \
+ upper_reg__((engine__)->mmio_base))
+
+#define ENGINE_READ_IDX(engine__, reg__, idx__) \
+ __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
+
+#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
+ __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
+
+#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
+#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
+#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
+
+#define GEN6_RING_FAULT_REG_READ(engine__) \
+ intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
+
+#define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
+ intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
+
+#define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
+({ \
+ u32 __val; \
+\
+ __val = intel_uncore_read((engine__)->uncore, \
+ RING_FAULT_REG(engine__)); \
+ __val &= ~(clear__); \
+ __val |= (set__); \
+ intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
+ __val); \
+})
+
+/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
+ * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
+ */
+
+static inline unsigned int
+execlists_num_ports(const struct intel_engine_execlists * const execlists)
+{
+ return execlists->port_mask + 1;
+}
+
+static inline struct i915_request *
+execlists_active(const struct intel_engine_execlists *execlists)
+{
+ struct i915_request * const *cur, * const *old, *active;
+
+ cur = READ_ONCE(execlists->active);
+ smp_rmb(); /* pairs with overwrite protection in process_csb() */
+ do {
+ old = cur;
+
+ active = READ_ONCE(*cur);
+ cur = READ_ONCE(execlists->active);
+
+ smp_rmb(); /* and complete the seqlock retry */
+ } while (unlikely(cur != old));
+
+ return active;
+}
+
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
+
+static inline u32
+intel_read_status_page(const struct intel_engine_cs *engine, int reg)
+{
+ /* Ensure that the compiler doesn't optimize away the load. */
+ return READ_ONCE(engine->status_page.addr[reg]);
+}
+
+static inline void
+intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
+{
+ /* Writing into the status page should be done sparingly. Since
+ * we do when we are uncertain of the device state, we take a bit
+ * of extra paranoia to try and ensure that the HWS takes the value
+ * we give and that it doesn't end up trapped inside the CPU!
+ */
+ drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value));
+ WRITE_ONCE(engine->status_page.addr[reg], value);
+ drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value));
+}
+
+/*
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ * 0x20-0x2f: Reserved (Gen6+)
+ *
+ * The area from dword 0x30 to 0x3ff is available for driver usage.
+ */
+#define I915_GEM_HWS_PREEMPT 0x32
+#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
+#define I915_GEM_HWS_SEQNO 0x40
+#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
+#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
+#define I915_GEM_HWS_PXP 0x60
+#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
+#define I915_GEM_HWS_SCRATCH 0x80
+
+#define I915_HWS_CSB_BUF0_INDEX 0x10
+#define I915_HWS_CSB_WRITE_INDEX 0x1f
+#define ICL_HWS_CSB_WRITE_INDEX 0x2f
+#define INTEL_HWS_CSB_WRITE_INDEX(__i915) \
+ (GRAPHICS_VER(__i915) >= 11 ? ICL_HWS_CSB_WRITE_INDEX : I915_HWS_CSB_WRITE_INDEX)
+
+void intel_engine_stop(struct intel_engine_cs *engine);
+void intel_engine_cleanup(struct intel_engine_cs *engine);
+
+int intel_engines_init_mmio(struct intel_gt *gt);
+int intel_engines_init(struct intel_gt *gt);
+
+void intel_engine_free_request_pool(struct intel_engine_cs *engine);
+
+void intel_engines_release(struct intel_gt *gt);
+void intel_engines_free(struct intel_gt *gt);
+
+int intel_engine_init_common(struct intel_engine_cs *engine);
+void intel_engine_cleanup_common(struct intel_engine_cs *engine);
+
+int intel_engine_resume(struct intel_engine_cs *engine);
+
+int intel_ring_submission_setup(struct intel_engine_cs *engine);
+
+int intel_engine_stop_cs(struct intel_engine_cs *engine);
+void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
+
+void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine);
+
+void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
+
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
+
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
+ struct intel_instdone *instdone);
+
+void intel_engine_init_execlists(struct intel_engine_cs *engine);
+
+bool intel_engine_irq_enable(struct intel_engine_cs *engine);
+void intel_engine_irq_disable(struct intel_engine_cs *engine);
+
+static inline void __intel_engine_reset(struct intel_engine_cs *engine,
+ bool stalled)
+{
+ if (engine->reset.rewind)
+ engine->reset.rewind(engine, stalled);
+ engine->serial++; /* contexts lost */
+}
+
+bool intel_engines_are_idle(struct intel_gt *gt);
+bool intel_engine_is_idle(struct intel_engine_cs *engine);
+
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync);
+static inline void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ __intel_engine_flush_submission(engine, true);
+}
+
+void intel_engines_reset_default_submission(struct intel_gt *gt);
+
+bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
+
+__printf(3, 4)
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...);
+void intel_engine_dump_active_requests(struct list_head *requests,
+ struct i915_request *hung_rq,
+ struct drm_printer *m);
+
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
+ ktime_t *now);
+
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq);
+
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
+struct intel_context *
+intel_engine_create_pinned_context(struct intel_engine_cs *engine,
+ struct i915_address_space *vm,
+ unsigned int ring_size,
+ unsigned int hwsp,
+ struct lock_class_key *key,
+ const char *name);
+
+void intel_engine_destroy_pinned_context(struct intel_context *ce);
+
+void xehp_enable_ccs_engines(struct intel_engine_cs *engine);
+
+#define ENGINE_PHYSICAL 0
+#define ENGINE_MOCK 1
+#define ENGINE_VIRTUAL 2
+
+static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine)
+{
+ return engine->gt->submission_method >= INTEL_SUBMISSION_GUC;
+}
+
+static inline bool
+intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
+{
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ return intel_engine_has_preemption(engine);
+}
+
+#define FORCE_VIRTUAL BIT(0)
+struct intel_context *
+intel_engine_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count, unsigned long flags);
+
+static inline struct intel_context *
+intel_engine_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_engines,
+ unsigned int width)
+{
+ GEM_BUG_ON(!engines[0]->cops->create_parallel);
+ return engines[0]->cops->create_parallel(engines, num_engines, width);
+}
+
+static inline bool
+intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine)
+{
+ /*
+ * For non-GuC submission we expect the back-end to look at the
+ * heartbeat status of the actual physical engine that the work
+ * has been (or is being) scheduled on, so we should only reach
+ * here with GuC submission enabled.
+ */
+ GEM_BUG_ON(!intel_engine_uses_guc(engine));
+
+ return intel_guc_virtual_engine_has_heartbeat(engine);
+}
+
+static inline bool
+intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
+{
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return false;
+
+ if (intel_engine_is_virtual(engine))
+ return intel_virtual_engine_has_heartbeat(engine);
+ else
+ return READ_ONCE(engine->props.heartbeat_interval_ms);
+}
+
+static inline struct intel_engine_cs *
+intel_engine_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
+{
+ GEM_BUG_ON(!intel_engine_is_virtual(engine));
+ return engine->cops->get_sibling(engine, sibling);
+}
+
+static inline void
+intel_engine_set_hung_context(struct intel_engine_cs *engine,
+ struct intel_context *ce)
+{
+ engine->hung_ce = ce;
+}
+
+static inline void
+intel_engine_clear_hung_context(struct intel_engine_cs *engine)
+{
+ intel_engine_set_hung_context(engine, NULL);
+}
+
+static inline struct intel_context *
+intel_engine_get_hung_context(struct intel_engine_cs *engine)
+{
+ return engine->hung_ce;
+}
+
+u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value);
+u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value);
+u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value);
+u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value);
+u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value);
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
new file mode 100644
index 000000000..07967adce
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -0,0 +1,2392 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include <drm/drm_print.h>
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
+#include "gt/intel_gt_regs.h"
+
+#include "i915_cmd_parser.h"
+#include "i915_drv.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_regs.h"
+#include "intel_engine_user.h"
+#include "intel_execlists_submission.h"
+#include "intel_gt.h"
+#include "intel_gt_mcr.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "uc/intel_guc_submission.h"
+
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
+
+#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
+#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
+#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
+#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
+
+#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
+
+#define MAX_MMIO_BASES 3
+struct engine_info {
+ u8 class;
+ u8 instance;
+ /* mmio bases table *must* be sorted in reverse graphics_ver order */
+ struct engine_mmio_base {
+ u32 graphics_ver : 8;
+ u32 base : 24;
+ } mmio_bases[MAX_MMIO_BASES];
+};
+
+static const struct engine_info intel_engines[] = {
+ [RCS0] = {
+ .class = RENDER_CLASS,
+ .instance = 0,
+ .mmio_bases = {
+ { .graphics_ver = 1, .base = RENDER_RING_BASE }
+ },
+ },
+ [BCS0] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 0,
+ .mmio_bases = {
+ { .graphics_ver = 6, .base = BLT_RING_BASE }
+ },
+ },
+ [BCS1] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 1,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE }
+ },
+ },
+ [BCS2] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 2,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE }
+ },
+ },
+ [BCS3] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 3,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE }
+ },
+ },
+ [BCS4] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 4,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE }
+ },
+ },
+ [BCS5] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 5,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE }
+ },
+ },
+ [BCS6] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 6,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE }
+ },
+ },
+ [BCS7] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 7,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE }
+ },
+ },
+ [BCS8] = {
+ .class = COPY_ENGINE_CLASS,
+ .instance = 8,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE }
+ },
+ },
+ [VCS0] = {
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 0,
+ .mmio_bases = {
+ { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
+ { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
+ { .graphics_ver = 4, .base = BSD_RING_BASE }
+ },
+ },
+ [VCS1] = {
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 1,
+ .mmio_bases = {
+ { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
+ { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
+ },
+ },
+ [VCS2] = {
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 2,
+ .mmio_bases = {
+ { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
+ },
+ },
+ [VCS3] = {
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 3,
+ .mmio_bases = {
+ { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
+ },
+ },
+ [VCS4] = {
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 4,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
+ },
+ },
+ [VCS5] = {
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 5,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
+ },
+ },
+ [VCS6] = {
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 6,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
+ },
+ },
+ [VCS7] = {
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 7,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
+ },
+ },
+ [VECS0] = {
+ .class = VIDEO_ENHANCEMENT_CLASS,
+ .instance = 0,
+ .mmio_bases = {
+ { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
+ { .graphics_ver = 7, .base = VEBOX_RING_BASE }
+ },
+ },
+ [VECS1] = {
+ .class = VIDEO_ENHANCEMENT_CLASS,
+ .instance = 1,
+ .mmio_bases = {
+ { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
+ },
+ },
+ [VECS2] = {
+ .class = VIDEO_ENHANCEMENT_CLASS,
+ .instance = 2,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
+ },
+ },
+ [VECS3] = {
+ .class = VIDEO_ENHANCEMENT_CLASS,
+ .instance = 3,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
+ },
+ },
+ [CCS0] = {
+ .class = COMPUTE_CLASS,
+ .instance = 0,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = GEN12_COMPUTE0_RING_BASE }
+ }
+ },
+ [CCS1] = {
+ .class = COMPUTE_CLASS,
+ .instance = 1,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = GEN12_COMPUTE1_RING_BASE }
+ }
+ },
+ [CCS2] = {
+ .class = COMPUTE_CLASS,
+ .instance = 2,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = GEN12_COMPUTE2_RING_BASE }
+ }
+ },
+ [CCS3] = {
+ .class = COMPUTE_CLASS,
+ .instance = 3,
+ .mmio_bases = {
+ { .graphics_ver = 12, .base = GEN12_COMPUTE3_RING_BASE }
+ }
+ },
+};
+
+/**
+ * intel_engine_context_size() - return the size of the context for an engine
+ * @gt: the gt
+ * @class: engine class
+ *
+ * Each engine class may require a different amount of space for a context
+ * image.
+ *
+ * Return: size (in bytes) of an engine class specific context image
+ *
+ * Note: this size includes the HWSP, which is part of the context image
+ * in LRC mode, but does not include the "shared data page" used with
+ * GuC submission. The caller should account for this if using the GuC.
+ */
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 cxt_size;
+
+ BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
+
+ switch (class) {
+ case COMPUTE_CLASS:
+ fallthrough;
+ case RENDER_CLASS:
+ switch (GRAPHICS_VER(gt->i915)) {
+ default:
+ MISSING_CASE(GRAPHICS_VER(gt->i915));
+ return DEFAULT_LR_CONTEXT_RENDER_SIZE;
+ case 12:
+ case 11:
+ return GEN11_LR_CONTEXT_RENDER_SIZE;
+ case 9:
+ return GEN9_LR_CONTEXT_RENDER_SIZE;
+ case 8:
+ return GEN8_LR_CONTEXT_RENDER_SIZE;
+ case 7:
+ if (IS_HASWELL(gt->i915))
+ return HSW_CXT_TOTAL_SIZE;
+
+ cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
+ return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
+ PAGE_SIZE);
+ case 6:
+ cxt_size = intel_uncore_read(uncore, CXT_SIZE);
+ return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
+ PAGE_SIZE);
+ case 5:
+ case 4:
+ /*
+ * There is a discrepancy here between the size reported
+ * by the register and the size of the context layout
+ * in the docs. Both are described as authorative!
+ *
+ * The discrepancy is on the order of a few cachelines,
+ * but the total is under one page (4k), which is our
+ * minimum allocation anyway so it should all come
+ * out in the wash.
+ */
+ cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
+ drm_dbg(&gt->i915->drm,
+ "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
+ GRAPHICS_VER(gt->i915), cxt_size * 64,
+ cxt_size - 1);
+ return round_up(cxt_size * 64, PAGE_SIZE);
+ case 3:
+ case 2:
+ /* For the special day when i810 gets merged. */
+ case 1:
+ return 0;
+ }
+ break;
+ default:
+ MISSING_CASE(class);
+ fallthrough;
+ case VIDEO_DECODE_CLASS:
+ case VIDEO_ENHANCEMENT_CLASS:
+ case COPY_ENGINE_CLASS:
+ if (GRAPHICS_VER(gt->i915) < 8)
+ return 0;
+ return GEN8_LR_CONTEXT_OTHER_SIZE;
+ }
+}
+
+static u32 __engine_mmio_base(struct drm_i915_private *i915,
+ const struct engine_mmio_base *bases)
+{
+ int i;
+
+ for (i = 0; i < MAX_MMIO_BASES; i++)
+ if (GRAPHICS_VER(i915) >= bases[i].graphics_ver)
+ break;
+
+ GEM_BUG_ON(i == MAX_MMIO_BASES);
+ GEM_BUG_ON(!bases[i].base);
+
+ return bases[i].base;
+}
+
+static void __sprint_engine_name(struct intel_engine_cs *engine)
+{
+ /*
+ * Before we know what the uABI name for this engine will be,
+ * we still would like to keep track of this engine in the debug logs.
+ * We throw in a ' here as a reminder that this isn't its final name.
+ */
+ GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
+ intel_engine_class_repr(engine->class),
+ engine->instance) >= sizeof(engine->name));
+}
+
+void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
+{
+ /*
+ * Though they added more rings on g4x/ilk, they did not add
+ * per-engine HWSTAM until gen6.
+ */
+ if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
+ return;
+
+ if (GRAPHICS_VER(engine->i915) >= 3)
+ ENGINE_WRITE(engine, RING_HWSTAM, mask);
+ else
+ ENGINE_WRITE16(engine, RING_HWSTAM, mask);
+}
+
+static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
+{
+ /* Mask off all writes into the unknown HWSP */
+ intel_engine_set_hwsp_writemask(engine, ~0u);
+}
+
+static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ GEM_DEBUG_WARN_ON(iir);
+}
+
+static u32 get_reset_domain(u8 ver, enum intel_engine_id id)
+{
+ u32 reset_domain;
+
+ if (ver >= 11) {
+ static const u32 engine_reset_domains[] = {
+ [RCS0] = GEN11_GRDOM_RENDER,
+ [BCS0] = GEN11_GRDOM_BLT,
+ [BCS1] = XEHPC_GRDOM_BLT1,
+ [BCS2] = XEHPC_GRDOM_BLT2,
+ [BCS3] = XEHPC_GRDOM_BLT3,
+ [BCS4] = XEHPC_GRDOM_BLT4,
+ [BCS5] = XEHPC_GRDOM_BLT5,
+ [BCS6] = XEHPC_GRDOM_BLT6,
+ [BCS7] = XEHPC_GRDOM_BLT7,
+ [BCS8] = XEHPC_GRDOM_BLT8,
+ [VCS0] = GEN11_GRDOM_MEDIA,
+ [VCS1] = GEN11_GRDOM_MEDIA2,
+ [VCS2] = GEN11_GRDOM_MEDIA3,
+ [VCS3] = GEN11_GRDOM_MEDIA4,
+ [VCS4] = GEN11_GRDOM_MEDIA5,
+ [VCS5] = GEN11_GRDOM_MEDIA6,
+ [VCS6] = GEN11_GRDOM_MEDIA7,
+ [VCS7] = GEN11_GRDOM_MEDIA8,
+ [VECS0] = GEN11_GRDOM_VECS,
+ [VECS1] = GEN11_GRDOM_VECS2,
+ [VECS2] = GEN11_GRDOM_VECS3,
+ [VECS3] = GEN11_GRDOM_VECS4,
+ [CCS0] = GEN11_GRDOM_RENDER,
+ [CCS1] = GEN11_GRDOM_RENDER,
+ [CCS2] = GEN11_GRDOM_RENDER,
+ [CCS3] = GEN11_GRDOM_RENDER,
+ };
+ GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
+ !engine_reset_domains[id]);
+ reset_domain = engine_reset_domains[id];
+ } else {
+ static const u32 engine_reset_domains[] = {
+ [RCS0] = GEN6_GRDOM_RENDER,
+ [BCS0] = GEN6_GRDOM_BLT,
+ [VCS0] = GEN6_GRDOM_MEDIA,
+ [VCS1] = GEN8_GRDOM_MEDIA2,
+ [VECS0] = GEN6_GRDOM_VECS,
+ };
+ GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
+ !engine_reset_domains[id]);
+ reset_domain = engine_reset_domains[id];
+ }
+
+ return reset_domain;
+}
+
+static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ u8 logical_instance)
+{
+ const struct engine_info *info = &intel_engines[id];
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_engine_cs *engine;
+ u8 guc_class;
+
+ BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
+ BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
+ BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1));
+ BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1));
+
+ if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
+ return -EINVAL;
+
+ if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
+ return -EINVAL;
+
+ if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+ return -EINVAL;
+
+ if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
+ return -EINVAL;
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
+
+ INIT_LIST_HEAD(&engine->pinned_contexts_list);
+ engine->id = id;
+ engine->legacy_idx = INVALID_ENGINE;
+ engine->mask = BIT(id);
+ engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915),
+ id);
+ engine->i915 = i915;
+ engine->gt = gt;
+ engine->uncore = gt->uncore;
+ guc_class = engine_class_to_guc_class(info->class);
+ engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
+ engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
+
+ engine->irq_handler = nop_irq_handler;
+
+ engine->class = info->class;
+ engine->instance = info->instance;
+ engine->logical_mask = BIT(logical_instance);
+ __sprint_engine_name(engine);
+
+ if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
+ __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
+ engine->class == RENDER_CLASS)
+ engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
+
+ /* features common between engines sharing EUs */
+ if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
+ engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
+ engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
+ }
+
+ engine->props.heartbeat_interval_ms =
+ CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.max_busywait_duration_ns =
+ CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
+ engine->props.preempt_timeout_ms =
+ CONFIG_DRM_I915_PREEMPT_TIMEOUT;
+ engine->props.stop_timeout_ms =
+ CONFIG_DRM_I915_STOP_TIMEOUT;
+ engine->props.timeslice_duration_ms =
+ CONFIG_DRM_I915_TIMESLICE_DURATION;
+
+ /* Override to uninterruptible for OpenCL workloads. */
+ if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
+ engine->props.preempt_timeout_ms = 0;
+
+ /* Cap properties according to any system limits */
+#define CLAMP_PROP(field) \
+ do { \
+ u64 clamp = intel_clamp_##field(engine, engine->props.field); \
+ if (clamp != engine->props.field) { \
+ drm_notice(&engine->i915->drm, \
+ "Warning, clamping %s to %lld to prevent overflow\n", \
+ #field, clamp); \
+ engine->props.field = clamp; \
+ } \
+ } while (0)
+
+ CLAMP_PROP(heartbeat_interval_ms);
+ CLAMP_PROP(max_busywait_duration_ns);
+ CLAMP_PROP(preempt_timeout_ms);
+ CLAMP_PROP(stop_timeout_ms);
+ CLAMP_PROP(timeslice_duration_ms);
+
+#undef CLAMP_PROP
+
+ engine->defaults = engine->props; /* never to change again */
+
+ engine->context_size = intel_engine_context_size(gt, engine->class);
+ if (WARN_ON(engine->context_size > BIT(20)))
+ engine->context_size = 0;
+ if (engine->context_size)
+ DRIVER_CAPS(i915)->has_logical_contexts = true;
+
+ ewma__engine_latency_init(&engine->latency);
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
+ /* Scrub mmio state on takeover */
+ intel_engine_sanitize_mmio(engine);
+
+ gt->engine_class[info->class][info->instance] = engine;
+ gt->engine[id] = engine;
+
+ return 0;
+}
+
+u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
+{
+ value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
+
+ return value;
+}
+
+u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
+{
+ value = min(value, jiffies_to_nsecs(2));
+
+ return value;
+}
+
+u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
+{
+ /*
+ * NB: The GuC API only supports 32bit values. However, the limit is further
+ * reduced due to internal calculations which would otherwise overflow.
+ */
+ if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
+
+ value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
+
+ return value;
+}
+
+u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
+{
+ value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
+
+ return value;
+}
+
+u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
+{
+ /*
+ * NB: The GuC API only supports 32bit values. However, the limit is further
+ * reduced due to internal calculations which would otherwise overflow.
+ */
+ if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
+ value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
+
+ value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
+
+ return value;
+}
+
+static void __setup_engine_capabilities(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (engine->class == VIDEO_DECODE_CLASS) {
+ /*
+ * HEVC support is present on first engine instance
+ * before Gen11 and on all instances afterwards.
+ */
+ if (GRAPHICS_VER(i915) >= 11 ||
+ (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
+ engine->uabi_capabilities |=
+ I915_VIDEO_CLASS_CAPABILITY_HEVC;
+
+ /*
+ * SFC block is present only on even logical engine
+ * instances.
+ */
+ if ((GRAPHICS_VER(i915) >= 11 &&
+ (engine->gt->info.vdbox_sfc_access &
+ BIT(engine->instance))) ||
+ (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
+ engine->uabi_capabilities |=
+ I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
+ } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
+ if (GRAPHICS_VER(i915) >= 9 &&
+ engine->gt->info.sfc_mask & BIT(engine->instance))
+ engine->uabi_capabilities |=
+ I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
+ }
+}
+
+static void intel_setup_engine_capabilities(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ __setup_engine_capabilities(engine);
+}
+
+/**
+ * intel_engines_release() - free the resources allocated for Command Streamers
+ * @gt: pointer to struct intel_gt
+ */
+void intel_engines_release(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Before we release the resources held by engine, we must be certain
+ * that the HW is no longer accessing them -- having the GPU scribble
+ * to or read from a page being used for something else causes no end
+ * of fun.
+ *
+ * The GPU should be reset by this point, but assume the worst just
+ * in case we aborted before completely initialising the engines.
+ */
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(gt, ALL_ENGINES);
+
+ /* Decouple the backend; but keep the layout for late GPU resets */
+ for_each_engine(engine, gt, id) {
+ if (!engine->release)
+ continue;
+
+ intel_wakeref_wait_for_idle(&engine->wakeref);
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+
+ engine->release(engine);
+ engine->release = NULL;
+
+ memset(&engine->reset, 0, sizeof(engine->reset));
+ }
+}
+
+void intel_engine_free_request_pool(struct intel_engine_cs *engine)
+{
+ if (!engine->request_pool)
+ return;
+
+ kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
+}
+
+void intel_engines_free(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Free the requests! dma-resv keeps fences around for an eternity */
+ rcu_barrier();
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_free_request_pool(engine);
+ kfree(engine);
+ gt->engine[id] = NULL;
+ }
+}
+
+static
+bool gen11_vdbox_has_sfc(struct intel_gt *gt,
+ unsigned int physical_vdbox,
+ unsigned int logical_vdbox, u16 vdbox_mask)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /*
+ * In Gen11, only even numbered logical VDBOXes are hooked
+ * up to an SFC (Scaler & Format Converter) unit.
+ * In Gen12, Even numbered physical instance always are connected
+ * to an SFC. Odd numbered physical instances have SFC only if
+ * previous even instance is fused off.
+ *
+ * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field
+ * in the fuse register that tells us whether a specific SFC is present.
+ */
+ if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
+ return false;
+ else if (MEDIA_VER(i915) >= 12)
+ return (physical_vdbox % 2 == 0) ||
+ !(BIT(physical_vdbox - 1) & vdbox_mask);
+ else if (MEDIA_VER(i915) == 11)
+ return logical_vdbox % 2 == 0;
+
+ return false;
+}
+
+static void engine_mask_apply_media_fuses(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ unsigned int logical_vdbox = 0;
+ unsigned int i;
+ u32 media_fuse, fuse1;
+ u16 vdbox_mask;
+ u16 vebox_mask;
+
+ if (MEDIA_VER(gt->i915) < 11)
+ return;
+
+ /*
+ * On newer platforms the fusing register is called 'enable' and has
+ * enable semantics, while on older platforms it is called 'disable'
+ * and bits have disable semantices.
+ */
+ media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+ if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
+ media_fuse = ~media_fuse;
+
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+ if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
+ fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
+ gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
+ } else {
+ gt->info.sfc_mask = ~0;
+ }
+
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(gt, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vdbox_mask)) {
+ gt->info.engine_mask &= ~BIT(_VCS(i));
+ drm_dbg(&i915->drm, "vcs%u fused off\n", i);
+ continue;
+ }
+
+ if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
+ gt->info.vdbox_sfc_access |= BIT(i);
+ logical_vdbox++;
+ }
+ drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(gt));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
+
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(gt, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vebox_mask)) {
+ gt->info.engine_mask &= ~BIT(_VECS(i));
+ drm_dbg(&i915->drm, "vecs%u fused off\n", i);
+ }
+ }
+ drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(gt));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+}
+
+static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_gt_info *info = &gt->info;
+ int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS;
+ unsigned long ccs_mask;
+ unsigned int i;
+
+ if (GRAPHICS_VER(i915) < 11)
+ return;
+
+ if (hweight32(CCS_MASK(gt)) <= 1)
+ return;
+
+ ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
+ ss_per_ccs);
+ /*
+ * If all DSS in a quadrant are fused off, the corresponding CCS
+ * engine is not available for use.
+ */
+ for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) {
+ info->engine_mask &= ~BIT(_CCS(i));
+ drm_dbg(&i915->drm, "ccs%u fused off\n", i);
+ }
+}
+
+static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_gt_info *info = &gt->info;
+ unsigned long meml3_mask;
+ unsigned long quad;
+
+ if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
+ return;
+
+ meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
+ meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
+
+ /*
+ * Link Copy engines may be fused off according to meml3_mask. Each
+ * bit is a quad that houses 2 Link Copy and two Sub Copy engines.
+ */
+ for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
+ unsigned int instance = quad * 2 + 1;
+ intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
+ _BCS(instance));
+
+ if (mask & info->engine_mask) {
+ drm_dbg(&i915->drm, "bcs%u fused off\n", instance);
+ drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1);
+
+ info->engine_mask &= ~mask;
+ }
+ }
+}
+
+/*
+ * Determine which engines are fused off in our particular hardware.
+ * Note that we have a catch-22 situation where we need to be able to access
+ * the blitter forcewake domain to read the engine fuses, but at the same time
+ * we need to know which engines are available on the system to know which
+ * forcewake domains are present. We solve this by intializing the forcewake
+ * domains based on the full engine mask in the platform capabilities before
+ * calling this function and pruning the domains for fused-off engines
+ * afterwards.
+ */
+static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
+{
+ struct intel_gt_info *info = &gt->info;
+
+ GEM_BUG_ON(!info->engine_mask);
+
+ engine_mask_apply_media_fuses(gt);
+ engine_mask_apply_compute_fuses(gt);
+ engine_mask_apply_copy_fuses(gt);
+
+ return info->engine_mask;
+}
+
+static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
+ u8 class, const u8 *map, u8 num_instances)
+{
+ int i, j;
+ u8 current_logical_id = 0;
+
+ for (j = 0; j < num_instances; ++j) {
+ for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
+ if (!HAS_ENGINE(gt, i) ||
+ intel_engines[i].class != class)
+ continue;
+
+ if (intel_engines[i].instance == map[j]) {
+ logical_ids[intel_engines[i].instance] =
+ current_logical_id++;
+ break;
+ }
+ }
+ }
+}
+
+static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
+{
+ /*
+ * Logical to physical mapping is needed for proper support
+ * to split-frame feature.
+ */
+ if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) {
+ const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 };
+
+ populate_logical_ids(gt, logical_ids, class,
+ map, ARRAY_SIZE(map));
+ } else {
+ int i;
+ u8 map[MAX_ENGINE_INSTANCE + 1];
+
+ for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
+ map[i] = i;
+ populate_logical_ids(gt, logical_ids, class,
+ map, ARRAY_SIZE(map));
+ }
+}
+
+/**
+ * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
+ * @gt: pointer to struct intel_gt
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_engines_init_mmio(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const unsigned int engine_mask = init_engine_mask(gt);
+ unsigned int mask = 0;
+ unsigned int i, class;
+ u8 logical_ids[MAX_ENGINE_INSTANCE + 1];
+ int err;
+
+ drm_WARN_ON(&i915->drm, engine_mask == 0);
+ drm_WARN_ON(&i915->drm, engine_mask &
+ GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+
+ if (i915_inject_probe_failure(i915))
+ return -ENODEV;
+
+ for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
+ setup_logical_ids(gt, logical_ids, class);
+
+ for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
+ u8 instance = intel_engines[i].instance;
+
+ if (intel_engines[i].class != class ||
+ !HAS_ENGINE(gt, i))
+ continue;
+
+ err = intel_engine_setup(gt, i,
+ logical_ids[instance]);
+ if (err)
+ goto cleanup;
+
+ mask |= BIT(i);
+ }
+ }
+
+ /*
+ * Catch failures to update intel_engines table when the new engines
+ * are added to the driver by a warning and disabling the forgotten
+ * engines.
+ */
+ if (drm_WARN_ON(&i915->drm, mask != engine_mask))
+ gt->info.engine_mask = mask;
+
+ gt->info.num_engines = hweight32(mask);
+
+ intel_gt_check_and_clear_faults(gt);
+
+ intel_setup_engine_capabilities(gt);
+
+ intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
+
+ return 0;
+
+cleanup:
+ intel_engines_free(gt);
+ return err;
+}
+
+void intel_engine_init_execlists(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ execlists->port_mask = 1;
+ GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
+ GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+
+ memset(execlists->pending, 0, sizeof(execlists->pending));
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
+}
+
+static void cleanup_status_page(struct intel_engine_cs *engine)
+{
+ struct i915_vma *vma;
+
+ /* Prevent writes into HWSP after returning the page to the system */
+ intel_engine_set_hwsp_writemask(engine, ~0u);
+
+ vma = fetch_and_zero(&engine->status_page.vma);
+ if (!vma)
+ return;
+
+ if (!HWS_NEEDS_PHYSICAL(engine->i915))
+ i915_vma_unpin(vma);
+
+ i915_gem_object_unpin_map(vma->obj);
+ i915_gem_object_put(vma->obj);
+}
+
+static int pin_ggtt_status_page(struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ struct i915_vma *vma)
+{
+ unsigned int flags;
+
+ if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
+ /*
+ * On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actually map it).
+ */
+ flags = PIN_MAPPABLE;
+ else
+ flags = PIN_HIGH;
+
+ return i915_ggtt_pin(vma, ww, 0, flags);
+}
+
+static int init_status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ void *vaddr;
+ int ret;
+
+ INIT_LIST_HEAD(&engine->status_page.timelines);
+
+ /*
+ * Though the HWS register does support 36bit addresses, historically
+ * we have had hangs and corruption reported due to wild writes if
+ * the HWS is placed above 4G. We only allow objects to be allocated
+ * in GFP_DMA32 for i965, and no earlier physical address users had
+ * access to more than 4G.
+ */
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ drm_err(&engine->i915->drm,
+ "Failed to allocate status page\n");
+ return PTR_ERR(obj);
+ }
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_put;
+ }
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
+ ret = pin_ggtt_status_page(engine, &ww, vma);
+ if (ret)
+ goto err;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_unpin;
+ }
+
+ engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
+ engine->status_page.vma = vma;
+
+err_unpin:
+ if (ret)
+ i915_vma_unpin(vma);
+err:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+err_put:
+ if (ret)
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static int engine_setup_common(struct intel_engine_cs *engine)
+{
+ int err;
+
+ init_llist_head(&engine->barrier_tasks);
+
+ err = init_status_page(engine);
+ if (err)
+ return err;
+
+ engine->breadcrumbs = intel_breadcrumbs_create(engine);
+ if (!engine->breadcrumbs) {
+ err = -ENOMEM;
+ goto err_status;
+ }
+
+ engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
+ if (!engine->sched_engine) {
+ err = -ENOMEM;
+ goto err_sched_engine;
+ }
+ engine->sched_engine->private_data = engine;
+
+ err = intel_engine_init_cmd_parser(engine);
+ if (err)
+ goto err_cmd_parser;
+
+ intel_engine_init_execlists(engine);
+ intel_engine_init__pm(engine);
+ intel_engine_init_retire(engine);
+
+ /* Use the whole device by default */
+ engine->sseu =
+ intel_sseu_from_device_info(&engine->gt->info.sseu);
+
+ intel_engine_init_workarounds(engine);
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_ctx_wa(engine);
+
+ if (GRAPHICS_VER(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
+ return 0;
+
+err_cmd_parser:
+ i915_sched_engine_put(engine->sched_engine);
+err_sched_engine:
+ intel_breadcrumbs_put(engine->breadcrumbs);
+err_status:
+ cleanup_status_page(engine);
+ return err;
+}
+
+struct measure_breadcrumb {
+ struct i915_request rq;
+ struct intel_ring ring;
+ u32 cs[2048];
+};
+
+static int measure_breadcrumb_dw(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct measure_breadcrumb *frame;
+ int dw;
+
+ GEM_BUG_ON(!engine->gt->scratch);
+
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame)
+ return -ENOMEM;
+
+ frame->rq.engine = engine;
+ frame->rq.context = ce;
+ rcu_assign_pointer(frame->rq.timeline, ce->timeline);
+ frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
+
+ frame->ring.vaddr = frame->cs;
+ frame->ring.size = sizeof(frame->cs);
+ frame->ring.wrap =
+ BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
+ frame->ring.effective_size = frame->ring.size;
+ intel_ring_update_space(&frame->ring);
+ frame->rq.ring = &frame->ring;
+
+ mutex_lock(&ce->timeline->mutex);
+ spin_lock_irq(&engine->sched_engine->lock);
+
+ dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+
+ spin_unlock_irq(&engine->sched_engine->lock);
+ mutex_unlock(&ce->timeline->mutex);
+
+ GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
+
+ kfree(frame);
+ return dw;
+}
+
+struct intel_context *
+intel_engine_create_pinned_context(struct intel_engine_cs *engine,
+ struct i915_address_space *vm,
+ unsigned int ring_size,
+ unsigned int hwsp,
+ struct lock_class_key *key,
+ const char *name)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+ ce->timeline = page_pack_bits(NULL, hwsp);
+ ce->ring = NULL;
+ ce->ring_size = ring_size;
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(vm);
+
+ err = intel_context_pin(ce); /* perma-pin so it is always available */
+ if (err) {
+ intel_context_put(ce);
+ return ERR_PTR(err);
+ }
+
+ list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
+
+ /*
+ * Give our perma-pinned kernel timelines a separate lockdep class,
+ * so that we can use them from within the normal user timelines
+ * should we need to inject GPU operations during their request
+ * construction.
+ */
+ lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
+
+ return ce;
+}
+
+void intel_engine_destroy_pinned_context(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_vma *hwsp = engine->status_page.vma;
+
+ GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
+
+ mutex_lock(&hwsp->vm->mutex);
+ list_del(&ce->timeline->engine_link);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ list_del(&ce->pinned_contexts_link);
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+}
+
+static struct intel_context *
+create_kernel_context(struct intel_engine_cs *engine)
+{
+ static struct lock_class_key kernel;
+
+ return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+ I915_GEM_HWS_SEQNO_ADDR,
+ &kernel, "kernel_context");
+}
+
+/**
+ * intel_engines_init_common - initialize cengine state which might require hw access
+ * @engine: Engine to initialize.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do require hardware access.
+ *
+ * Typcally done at later stages of submission mode specific engine setup.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+static int engine_init_common(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int ret;
+
+ engine->set_default_submission(engine);
+
+ /*
+ * We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ce = create_kernel_context(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ ret = measure_breadcrumb_dw(ce);
+ if (ret < 0)
+ goto err_context;
+
+ engine->emit_fini_breadcrumb_dw = ret;
+ engine->kernel_context = ce;
+
+ return 0;
+
+err_context:
+ intel_engine_destroy_pinned_context(ce);
+ return ret;
+}
+
+int intel_engines_init(struct intel_gt *gt)
+{
+ int (*setup)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc)) {
+ gt->submission_method = INTEL_SUBMISSION_GUC;
+ setup = intel_guc_submission_setup;
+ } else if (HAS_EXECLISTS(gt->i915)) {
+ gt->submission_method = INTEL_SUBMISSION_ELSP;
+ setup = intel_execlists_submission_setup;
+ } else {
+ gt->submission_method = INTEL_SUBMISSION_RING;
+ setup = intel_ring_submission_setup;
+ }
+
+ for_each_engine(engine, gt, id) {
+ err = engine_setup_common(engine);
+ if (err)
+ return err;
+
+ err = setup(engine);
+ if (err) {
+ intel_engine_cleanup_common(engine);
+ return err;
+ }
+
+ /* The backend should now be responsible for cleanup */
+ GEM_BUG_ON(engine->release == NULL);
+
+ err = engine_init_common(engine);
+ if (err)
+ return err;
+
+ intel_engine_add_user(engine);
+ }
+
+ return 0;
+}
+
+/**
+ * intel_engines_cleanup_common - cleans up the engine state created by
+ * the common initiailizers.
+ * @engine: Engine to cleanup.
+ *
+ * This cleans up everything created by the common helpers.
+ */
+void intel_engine_cleanup_common(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
+
+ i915_sched_engine_put(engine->sched_engine);
+ intel_breadcrumbs_put(engine->breadcrumbs);
+
+ intel_engine_fini_retire(engine);
+ intel_engine_cleanup_cmd_parser(engine);
+
+ if (engine->default_state)
+ fput(engine->default_state);
+
+ if (engine->kernel_context)
+ intel_engine_destroy_pinned_context(engine->kernel_context);
+
+ GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+ cleanup_status_page(engine);
+
+ intel_wa_list_free(&engine->ctx_wa_list);
+ intel_wa_list_free(&engine->wa_list);
+ intel_wa_list_free(&engine->whitelist);
+}
+
+/**
+ * intel_engine_resume - re-initializes the HW state of the engine
+ * @engine: Engine to resume.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+int intel_engine_resume(struct intel_engine_cs *engine)
+{
+ intel_engine_apply_workarounds(engine);
+ intel_engine_apply_whitelist(engine);
+
+ return engine->resume(engine);
+}
+
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ u64 acthd;
+
+ if (GRAPHICS_VER(i915) >= 8)
+ acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
+ else if (GRAPHICS_VER(i915) >= 4)
+ acthd = ENGINE_READ(engine, RING_ACTHD);
+ else
+ acthd = ENGINE_READ(engine, ACTHD);
+
+ return acthd;
+}
+
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
+{
+ u64 bbaddr;
+
+ if (GRAPHICS_VER(engine->i915) >= 8)
+ bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
+ else
+ bbaddr = ENGINE_READ(engine, RING_BBADDR);
+
+ return bbaddr;
+}
+
+static unsigned long stop_timeout(const struct intel_engine_cs *engine)
+{
+ if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
+ return 0;
+
+ /*
+ * If we are doing a normal GPU reset, we can take our time and allow
+ * the engine to quiesce. We've stopped submission to the engine, and
+ * if we wait long enough an innocent context should complete and
+ * leave the engine idle. So they should not be caught unaware by
+ * the forthcoming GPU reset (which usually follows the stop_cs)!
+ */
+ return READ_ONCE(engine->props.stop_timeout_ms);
+}
+
+static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
+ int fast_timeout_us,
+ int slow_timeout_ms)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
+ int err;
+
+ intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+ /*
+ * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is
+ * stopped, set ring stop bit and prefetch disable bit to halt CS
+ */
+ if (IS_GRAPHICS_VER(engine->i915, 11, 12))
+ intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
+ _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
+
+ err = __intel_wait_for_register_fw(engine->uncore, mode,
+ MODE_IDLE, MODE_IDLE,
+ fast_timeout_us,
+ slow_timeout_ms,
+ NULL);
+
+ /* A final mmio read to let GPU writes be hopefully flushed to memory */
+ intel_uncore_posting_read_fw(uncore, mode);
+ return err;
+}
+
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+ int err = 0;
+
+ if (GRAPHICS_VER(engine->i915) < 3)
+ return -ENODEV;
+
+ ENGINE_TRACE(engine, "\n");
+ /*
+ * TODO: Find out why occasionally stopping the CS times out. Seen
+ * especially with gem_eio tests.
+ *
+ * Occasionally trying to stop the cs times out, but does not adversely
+ * affect functionality. The timeout is set as a config parameter that
+ * defaults to 100ms. In most cases the follow up operation is to wait
+ * for pending MI_FORCE_WAKES. The assumption is that this timeout is
+ * sufficient for any pending MI_FORCEWAKEs to complete. Once root
+ * caused, the caller must check and handle the return from this
+ * function.
+ */
+ if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
+ ENGINE_TRACE(engine,
+ "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
+ ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
+
+ /*
+ * Sometimes we observe that the idle flag is not
+ * set even though the ring is empty. So double
+ * check before giving up.
+ */
+ if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
+ err = -ETIMEDOUT;
+ }
+
+ return err;
+}
+
+void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
+{
+ ENGINE_TRACE(engine, "\n");
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+}
+
+static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
+{
+ static const i915_reg_t _reg[I915_NUM_ENGINES] = {
+ [RCS0] = MSG_IDLE_CS,
+ [BCS0] = MSG_IDLE_BCS,
+ [VCS0] = MSG_IDLE_VCS0,
+ [VCS1] = MSG_IDLE_VCS1,
+ [VCS2] = MSG_IDLE_VCS2,
+ [VCS3] = MSG_IDLE_VCS3,
+ [VCS4] = MSG_IDLE_VCS4,
+ [VCS5] = MSG_IDLE_VCS5,
+ [VCS6] = MSG_IDLE_VCS6,
+ [VCS7] = MSG_IDLE_VCS7,
+ [VECS0] = MSG_IDLE_VECS0,
+ [VECS1] = MSG_IDLE_VECS1,
+ [VECS2] = MSG_IDLE_VECS2,
+ [VECS3] = MSG_IDLE_VECS3,
+ [CCS0] = MSG_IDLE_CS,
+ [CCS1] = MSG_IDLE_CS,
+ [CCS2] = MSG_IDLE_CS,
+ [CCS3] = MSG_IDLE_CS,
+ };
+ u32 val;
+
+ if (!_reg[engine->id].reg) {
+ drm_err(&engine->i915->drm,
+ "MSG IDLE undefined for engine id %u\n", engine->id);
+ return 0;
+ }
+
+ val = intel_uncore_read(engine->uncore, _reg[engine->id]);
+
+ /* bits[29:25] & bits[13:9] >> shift */
+ return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
+}
+
+static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
+{
+ int ret;
+
+ /* Ensure GPM receives fw up/down after CS is stopped */
+ udelay(1);
+
+ /* Wait for forcewake request to complete in GPM */
+ ret = __intel_wait_for_register_fw(gt->uncore,
+ GEN9_PWRGT_DOMAIN_STATUS,
+ fw_mask, fw_mask, 5000, 0, NULL);
+
+ /* Ensure CS receives fw ack from GPM */
+ udelay(1);
+
+ if (ret)
+ GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
+}
+
+/*
+ * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
+ * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
+ * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the
+ * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
+ * are concerned only with the gt reset here, we use a logical OR of pending
+ * forcewakeups from all reset domains and then wait for them to complete by
+ * querying PWRGT_DOMAIN_STATUS.
+ */
+void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
+{
+ u32 fw_pending = __cs_pending_mi_force_wakes(engine);
+
+ if (fw_pending)
+ __gpm_wait_for_fw_complete(engine->gt, fw_pending);
+}
+
+/* NB: please notice the memset */
+void intel_engine_get_instdone(const struct intel_engine_cs *engine,
+ struct intel_instdone *instdone)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ u32 mmio_base = engine->mmio_base;
+ int slice;
+ int subslice;
+ int iter;
+
+ memset(instdone, 0, sizeof(*instdone));
+
+ if (GRAPHICS_VER(i915) >= 8) {
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS0)
+ return;
+
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+ if (GRAPHICS_VER(i915) >= 12) {
+ instdone->slice_common_extra[0] =
+ intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
+ instdone->slice_common_extra[1] =
+ intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
+ }
+
+ for_each_ss_steering(iter, engine->gt, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ intel_gt_mcr_read(engine->gt,
+ GEN7_SAMPLER_INSTDONE,
+ slice, subslice);
+ instdone->row[slice][subslice] =
+ intel_gt_mcr_read(engine->gt,
+ GEN7_ROW_INSTDONE,
+ slice, subslice);
+ }
+
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
+ for_each_ss_steering(iter, engine->gt, slice, subslice)
+ instdone->geom_svg[slice][subslice] =
+ intel_gt_mcr_read(engine->gt,
+ XEHPG_INSTDONE_GEOM_SVG,
+ slice, subslice);
+ }
+ } else if (GRAPHICS_VER(i915) >= 7) {
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
+
+ if (engine->id != RCS0)
+ return;
+
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+ instdone->sampler[0][0] =
+ intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
+ instdone->row[0][0] =
+ intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
+ } else if (GRAPHICS_VER(i915) >= 4) {
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
+ if (engine->id == RCS0)
+ /* HACK: Using the wrong struct member */
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN4_INSTDONE1);
+ } else {
+ instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
+ }
+}
+
+static bool ring_is_idle(struct intel_engine_cs *engine)
+{
+ bool idle = true;
+
+ if (I915_SELFTEST_ONLY(!engine->mmio_base))
+ return true;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return true;
+
+ /* First check that no commands are left in the ring */
+ if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
+ idle = false;
+
+ /* No bit for gen2, so assume the CS parser is idle */
+ if (GRAPHICS_VER(engine->i915) > 2 &&
+ !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
+ idle = false;
+
+ intel_engine_pm_put(engine);
+
+ return idle;
+}
+
+void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
+{
+ struct tasklet_struct *t = &engine->sched_engine->tasklet;
+
+ if (!t->callback)
+ return;
+
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->callback(t);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
+
+ /* Synchronise and wait for the tasklet on another CPU */
+ if (sync)
+ tasklet_unlock_wait(t);
+}
+
+/**
+ * intel_engine_is_idle() - Report if the engine has finished process all work
+ * @engine: the intel_engine_cs
+ *
+ * Return true if there are no requests pending, nothing left to be submitted
+ * to hardware, and that the engine is idle.
+ */
+bool intel_engine_is_idle(struct intel_engine_cs *engine)
+{
+ /* More white lies, if wedged, hw state is inconsistent */
+ if (intel_gt_is_wedged(engine->gt))
+ return true;
+
+ if (!intel_engine_pm_is_awake(engine))
+ return true;
+
+ /* Waiting to drain ELSP? */
+ intel_synchronize_hardirq(engine->i915);
+ intel_engine_flush_submission(engine);
+
+ /* ELSP is empty, but there are ready requests? E.g. after reset */
+ if (!i915_sched_engine_is_empty(engine->sched_engine))
+ return false;
+
+ /* Ring stopped? */
+ return ring_is_idle(engine);
+}
+
+bool intel_engines_are_idle(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * If the driver is wedged, HW state may be very inconsistent and
+ * report that it is still busy, even though we have stopped using it.
+ */
+ if (intel_gt_is_wedged(gt))
+ return true;
+
+ /* Already parked (and passed an idleness test); must still be idle */
+ if (!READ_ONCE(gt->awake))
+ return true;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_is_idle(engine))
+ return false;
+ }
+
+ return true;
+}
+
+bool intel_engine_irq_enable(struct intel_engine_cs *engine)
+{
+ if (!engine->irq_enable)
+ return false;
+
+ /* Caller disables interrupts */
+ spin_lock(engine->gt->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock(engine->gt->irq_lock);
+
+ return true;
+}
+
+void intel_engine_irq_disable(struct intel_engine_cs *engine)
+{
+ if (!engine->irq_disable)
+ return;
+
+ /* Caller disables interrupts */
+ spin_lock(engine->gt->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock(engine->gt->irq_lock);
+}
+
+void intel_engines_reset_default_submission(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id) {
+ if (engine->sanitize)
+ engine->sanitize(engine);
+
+ engine->set_default_submission(engine);
+ }
+}
+
+bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
+{
+ switch (GRAPHICS_VER(engine->i915)) {
+ case 2:
+ return false; /* uses physical not virtual addresses */
+ case 3:
+ /* maybe only uses physical not virtual addresses */
+ return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
+ case 4:
+ return !IS_I965G(engine->i915); /* who knows! */
+ case 6:
+ return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
+ default:
+ return true;
+ }
+}
+
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *tl;
+
+ /*
+ * Even though we are holding the engine->sched_engine->lock here, there
+ * is no control over the submission queue per-se and we are
+ * inspecting the active state at a random point in time, with an
+ * unknown queue. Play safe and make sure the timeline remains valid.
+ * (Only being used for pretty printing, one extra kref shouldn't
+ * cause a camel stampede!)
+ */
+ rcu_read_lock();
+ tl = rcu_dereference(rq->timeline);
+ if (!kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+
+ return tl;
+}
+
+static int print_ring(char *buf, int sz, struct i915_request *rq)
+{
+ int len = 0;
+
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = get_timeline(rq);
+
+ len = scnprintf(buf, sz,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
+
+ if (tl)
+ intel_timeline_put(tl);
+ }
+
+ return len;
+}
+
+static void hexdump(struct drm_printer *m, const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ drm_printf(m, "*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ drm_printf(m, "[%04zx] %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
+static const char *repr_timer(const struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return "inactive";
+
+ if (timer_pending(t))
+ return "active";
+
+ return "expired";
+}
+
+static void intel_engine_print_registers(struct intel_engine_cs *engine,
+ struct drm_printer *m)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ u64 addr;
+
+ if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
+ drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
+ if (HAS_EXECLISTS(dev_priv)) {
+ drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
+ drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
+ }
+ drm_printf(m, "\tRING_START: 0x%08x\n",
+ ENGINE_READ(engine, RING_START));
+ drm_printf(m, "\tRING_HEAD: 0x%08x\n",
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
+ drm_printf(m, "\tRING_TAIL: 0x%08x\n",
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
+ drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
+ if (GRAPHICS_VER(engine->i915) > 2) {
+ drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
+ ENGINE_READ(engine, RING_MI_MODE),
+ ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
+ }
+
+ if (GRAPHICS_VER(dev_priv) >= 6) {
+ drm_printf(m, "\tRING_IMR: 0x%08x\n",
+ ENGINE_READ(engine, RING_IMR));
+ drm_printf(m, "\tRING_ESR: 0x%08x\n",
+ ENGINE_READ(engine, RING_ESR));
+ drm_printf(m, "\tRING_EMR: 0x%08x\n",
+ ENGINE_READ(engine, RING_EMR));
+ drm_printf(m, "\tRING_EIR: 0x%08x\n",
+ ENGINE_READ(engine, RING_EIR));
+ }
+
+ addr = intel_engine_get_active_head(engine);
+ drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ addr = intel_engine_get_last_batch_head(engine);
+ drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ if (GRAPHICS_VER(dev_priv) >= 8)
+ addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
+ else if (GRAPHICS_VER(dev_priv) >= 4)
+ addr = ENGINE_READ(engine, RING_DMA_FADD);
+ else
+ addr = ENGINE_READ(engine, DMA_FADD_I8XX);
+ drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ if (GRAPHICS_VER(dev_priv) >= 4) {
+ drm_printf(m, "\tIPEIR: 0x%08x\n",
+ ENGINE_READ(engine, RING_IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n",
+ ENGINE_READ(engine, RING_IPEHR));
+ } else {
+ drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
+ }
+
+ if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) {
+ struct i915_request * const *port, *rq;
+ const u32 *hws =
+ &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+ const u8 num_entries = execlists->csb_size;
+ unsigned int idx;
+ u8 read, write;
+
+ drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
+ str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)),
+ str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)),
+ repr_timer(&engine->execlists.preempt),
+ repr_timer(&engine->execlists.timer));
+
+ read = execlists->csb_head;
+ write = READ_ONCE(*execlists->csb_write);
+
+ drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
+ read, write, num_entries);
+
+ if (read >= num_entries)
+ read = 0;
+ if (write >= num_entries)
+ write = 0;
+ if (read > write)
+ write += num_entries;
+ while (read < write) {
+ idx = ++read % num_entries;
+ drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+ idx, hws[idx * 2], hws[idx * 2 + 1]);
+ }
+
+ i915_sched_engine_active_lock_bh(engine->sched_engine);
+ rcu_read_lock();
+ for (port = execlists->active; (rq = *port); port++) {
+ char hdr[160];
+ int len;
+
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d]: ccid:%08x%s%s, ",
+ (int)(port - execlists->active),
+ rq->context->lrc.ccid,
+ intel_context_is_closed(rq->context) ? "!" : "",
+ intel_context_is_banned(rq->context) ? "*" : "");
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ i915_request_show(m, rq, hdr, 0);
+ }
+ for (port = execlists->pending; (rq = *port); port++) {
+ char hdr[160];
+ int len;
+
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tPending[%d]: ccid:%08x%s%s, ",
+ (int)(port - execlists->pending),
+ rq->context->lrc.ccid,
+ intel_context_is_closed(rq->context) ? "!" : "",
+ intel_context_is_banned(rq->context) ? "*" : "");
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ i915_request_show(m, rq, hdr, 0);
+ }
+ rcu_read_unlock();
+ i915_sched_engine_active_unlock_bh(engine->sched_engine);
+ } else if (GRAPHICS_VER(dev_priv) > 6) {
+ drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+ ENGINE_READ(engine, RING_PP_DIR_BASE));
+ drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+ ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
+ drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+ ENGINE_READ(engine, RING_PP_DIR_DCLV));
+ }
+}
+
+static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
+{
+ struct i915_vma_resource *vma_res = rq->batch_res;
+ void *ring;
+ int size;
+
+ drm_printf(m,
+ "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
+ rq->head, rq->postfix, rq->tail,
+ vma_res ? upper_32_bits(vma_res->start) : ~0u,
+ vma_res ? lower_32_bits(vma_res->start) : ~0u);
+
+ size = rq->tail - rq->head;
+ if (rq->tail < rq->head)
+ size += rq->ring->size;
+
+ ring = kmalloc(size, GFP_ATOMIC);
+ if (ring) {
+ const void *vaddr = rq->ring->vaddr;
+ unsigned int head = rq->head;
+ unsigned int len = 0;
+
+ if (rq->tail < head) {
+ len = rq->ring->size - head;
+ memcpy(ring, vaddr + head, len);
+ head = 0;
+ }
+ memcpy(ring + len, vaddr + head, size - len);
+
+ hexdump(m, ring, size);
+ kfree(ring);
+ }
+}
+
+static unsigned long read_ul(void *p, size_t x)
+{
+ return *(unsigned long *)(p + x);
+}
+
+static void print_properties(struct intel_engine_cs *engine,
+ struct drm_printer *m)
+{
+ static const struct pmap {
+ size_t offset;
+ const char *name;
+ } props[] = {
+#define P(x) { \
+ .offset = offsetof(typeof(engine->props), x), \
+ .name = #x \
+}
+ P(heartbeat_interval_ms),
+ P(max_busywait_duration_ns),
+ P(preempt_timeout_ms),
+ P(stop_timeout_ms),
+ P(timeslice_duration_ms),
+
+ {},
+#undef P
+ };
+ const struct pmap *p;
+
+ drm_printf(m, "\tProperties:\n");
+ for (p = props; p->name; p++)
+ drm_printf(m, "\t\t%s: %lu [default %lu]\n",
+ p->name,
+ read_ul(&engine->props, p->offset),
+ read_ul(&engine->defaults, p->offset));
+}
+
+static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
+{
+ struct intel_timeline *tl = get_timeline(rq);
+
+ i915_request_show(m, rq, msg, 0);
+
+ drm_printf(m, "\t\tring->start: 0x%08x\n",
+ i915_ggtt_offset(rq->ring->vma));
+ drm_printf(m, "\t\tring->head: 0x%08x\n",
+ rq->ring->head);
+ drm_printf(m, "\t\tring->tail: 0x%08x\n",
+ rq->ring->tail);
+ drm_printf(m, "\t\tring->emit: 0x%08x\n",
+ rq->ring->emit);
+ drm_printf(m, "\t\tring->space: 0x%08x\n",
+ rq->ring->space);
+
+ if (tl) {
+ drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
+ tl->hwsp_offset);
+ intel_timeline_put(tl);
+ }
+
+ print_request_ring(m, rq);
+
+ if (rq->context->lrc_reg_state) {
+ drm_printf(m, "Logical Ring Context:\n");
+ hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
+ }
+}
+
+void intel_engine_dump_active_requests(struct list_head *requests,
+ struct i915_request *hung_rq,
+ struct drm_printer *m)
+{
+ struct i915_request *rq;
+ const char *msg;
+ enum i915_request_state state;
+
+ list_for_each_entry(rq, requests, sched.link) {
+ if (rq == hung_rq)
+ continue;
+
+ state = i915_test_request_state(rq);
+ if (state < I915_REQUEST_QUEUED)
+ continue;
+
+ if (state == I915_REQUEST_ACTIVE)
+ msg = "\t\tactive on engine";
+ else
+ msg = "\t\tactive in queue";
+
+ engine_dump_request(rq, m, msg);
+ }
+}
+
+static void engine_dump_active_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m)
+{
+ struct intel_context *hung_ce = NULL;
+ struct i915_request *hung_rq = NULL;
+
+ /*
+ * No need for an engine->irq_seqno_barrier() before the seqno reads.
+ * The GPU is still running so requests are still executing and any
+ * hardware reads will be out of date by the time they are reported.
+ * But the intention here is just to report an instantaneous snapshot
+ * so that's fine.
+ */
+ intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
+
+ drm_printf(m, "\tRequests:\n");
+
+ if (hung_rq)
+ engine_dump_request(hung_rq, m, "\t\thung");
+ else if (hung_ce)
+ drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
+
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
+ intel_guc_dump_active_requests(engine, hung_rq, m);
+ else
+ intel_execlists_dump_active_requests(engine, hung_rq, m);
+
+ if (hung_rq)
+ i915_request_put(hung_rq);
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...)
+{
+ struct i915_gpu_error * const error = &engine->i915->gpu_error;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ ktime_t dummy;
+
+ if (header) {
+ va_list ap;
+
+ va_start(ap, header);
+ drm_vprintf(m, header, &ap);
+ va_end(ap);
+ }
+
+ if (intel_gt_is_wedged(engine->gt))
+ drm_printf(m, "*** WEDGED ***\n");
+
+ drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
+ drm_printf(m, "\tBarriers?: %s\n",
+ str_yes_no(!llist_empty(&engine->barrier_tasks)));
+ drm_printf(m, "\tLatency: %luus\n",
+ ewma__engine_latency_read(&engine->latency));
+ if (intel_engine_supports_stats(engine))
+ drm_printf(m, "\tRuntime: %llums\n",
+ ktime_to_ms(intel_engine_get_busy_time(engine,
+ &dummy)));
+ drm_printf(m, "\tForcewake: %x domains, %d active\n",
+ engine->fw_domain, READ_ONCE(engine->fw_active));
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ drm_printf(m, "\tHeartbeat: %d ms ago\n",
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+ rcu_read_unlock();
+ drm_printf(m, "\tReset count: %d (global %d)\n",
+ i915_reset_engine_count(error, engine),
+ i915_reset_count(error));
+ print_properties(engine, m);
+
+ engine_dump_active_requests(engine, m);
+
+ drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
+ wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
+ if (wakeref) {
+ intel_engine_print_registers(engine, m);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
+ } else {
+ drm_printf(m, "\tDevice is asleep; skipping register dump\n");
+ }
+
+ intel_execlists_show_requests(engine, m, i915_request_show, 8);
+
+ drm_printf(m, "HWSP:\n");
+ hexdump(m, engine->status_page.addr, PAGE_SIZE);
+
+ drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine)));
+
+ intel_engine_print_breadcrumbs(engine, m);
+}
+
+/**
+ * intel_engine_get_busy_time() - Return current accumulated engine busyness
+ * @engine: engine to report on
+ * @now: monotonic timestamp of sampling
+ *
+ * Returns accumulated time @engine was busy since engine stats were enabled.
+ */
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
+{
+ return engine->busyness(engine, now);
+}
+
+struct intel_context *
+intel_engine_create_virtual(struct intel_engine_cs **siblings,
+ unsigned int count, unsigned long flags)
+{
+ if (count == 0)
+ return ERR_PTR(-EINVAL);
+
+ if (count == 1 && !(flags & FORCE_VIRTUAL))
+ return intel_context_create(siblings[0]);
+
+ GEM_BUG_ON(!siblings[0]->cops->create_virtual);
+ return siblings[0]->cops->create_virtual(siblings, count, flags);
+}
+
+static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *request, *active = NULL;
+
+ /*
+ * This search does not work in GuC submission mode. However, the GuC
+ * will report the hanging context directly to the driver itself. So
+ * the driver should never get here when in GuC mode.
+ */
+ GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
+
+ /*
+ * We are called by the error capture, reset and to dump engine
+ * state at random points in time. In particular, note that neither is
+ * crucially ordered with an interrupt. After a hang, the GPU is dead
+ * and we assume that no more writes can happen (we waited long enough
+ * for all writes that were in transaction to be flushed) - adding an
+ * extra delay for a recent interrupt is pointless. Hence, we do
+ * not need an engine->irq_seqno_barrier() before the seqno reads.
+ * At all other times, we must assume the GPU is still running, but
+ * we only care about the snapshot of this moment.
+ */
+ lockdep_assert_held(&engine->sched_engine->lock);
+
+ rcu_read_lock();
+ request = execlists_active(&engine->execlists);
+ if (request) {
+ struct intel_timeline *tl = request->context->timeline;
+
+ list_for_each_entry_from_reverse(request, &tl->requests, link) {
+ if (__i915_request_is_complete(request))
+ break;
+
+ active = request;
+ }
+ }
+ rcu_read_unlock();
+ if (active)
+ return active;
+
+ list_for_each_entry(request, &engine->sched_engine->requests,
+ sched.link) {
+ if (i915_test_request_state(request) != I915_REQUEST_ACTIVE)
+ continue;
+
+ active = request;
+ break;
+ }
+
+ return active;
+}
+
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq)
+{
+ unsigned long flags;
+
+ *ce = intel_engine_get_hung_context(engine);
+ if (*ce) {
+ intel_engine_clear_hung_context(engine);
+
+ *rq = intel_context_get_active_request(*ce);
+ return;
+ }
+
+ /*
+ * Getting here with GuC enabled means it is a forced error capture
+ * with no actual hang. So, no need to attempt the execlist search.
+ */
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
+ return;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+ *rq = engine_execlist_find_hung_request(engine);
+ if (*rq)
+ *rq = i915_request_get_rcu(*rq);
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
+void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
+{
+ /*
+ * If there are any non-fused-off CCS engines, we need to enable CCS
+ * support in the RCU_MODE register. This only needs to be done once,
+ * so for simplicity we'll take care of this in the RCS engine's
+ * resume handler; since the RCS and all CCS engines belong to the
+ * same reset domain and are reset together, this will also take care
+ * of re-applying the setting after i915-triggered resets.
+ */
+ if (!CCS_MASK(engine->gt))
+ return;
+
+ intel_uncore_write(engine->uncore, GEN12_RCU_MODE,
+ _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "mock_engine.c"
+#include "selftest_engine.c"
+#include "selftest_engine_cs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
new file mode 100644
index 000000000..a3698f611
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_request.h"
+
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gt.h"
+#include "intel_reset.h"
+
+/*
+ * While the engine is active, we send a periodic pulse along the engine
+ * to check on its health and to flush any idle-barriers. If that request
+ * is stuck, and we fail to preempt it, we declare the engine hung and
+ * issue a reset -- in the hope that restores progress.
+ */
+
+static bool next_heartbeat(struct intel_engine_cs *engine)
+{
+ long delay;
+
+ delay = READ_ONCE(engine->props.heartbeat_interval_ms);
+ if (!delay)
+ return false;
+
+ delay = msecs_to_jiffies_timeout(delay);
+ if (delay >= HZ)
+ delay = round_jiffies_up_relative(delay);
+ mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay + 1);
+
+ return true;
+}
+
+static struct i915_request *
+heartbeat_create(struct intel_context *ce, gfp_t gfp)
+{
+ struct i915_request *rq;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, gfp);
+ intel_context_exit(ce);
+
+ return rq;
+}
+
+static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
+ i915_request_add_active_barriers(rq);
+ if (!engine->heartbeat.systole && intel_engine_has_heartbeat(engine))
+ engine->heartbeat.systole = i915_request_get(rq);
+}
+
+static void heartbeat_commit(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
+ idle_pulse(rq->engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, attr);
+}
+
+static void show_heartbeat(const struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct drm_printer p = drm_debug_printer("heartbeat");
+
+ if (!rq) {
+ intel_engine_dump(engine, &p,
+ "%s heartbeat not ticking\n",
+ engine->name);
+ } else {
+ intel_engine_dump(engine, &p,
+ "%s heartbeat {seqno:%llx:%lld, prio:%d} not ticking\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ rq->sched.attr.priority);
+ }
+}
+
+static void
+reset_engine(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ show_heartbeat(rq, engine);
+
+ if (intel_engine_uses_guc(engine))
+ /*
+ * GuC itself is toast or GuC's hang detection
+ * is disabled. Either way, need to find the
+ * hang culprit manually.
+ */
+ intel_guc_find_hung_context(engine);
+
+ intel_gt_handle_error(engine->gt, engine->mask,
+ I915_ERROR_CAPTURE,
+ "stopped heartbeat on %s",
+ engine->name);
+}
+
+static void heartbeat(struct work_struct *wrk)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
+ struct intel_engine_cs *engine =
+ container_of(wrk, typeof(*engine), heartbeat.work.work);
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+ unsigned long serial;
+
+ /* Just in case everything has gone horribly wrong, give it a kick */
+ intel_engine_flush_submission(engine);
+
+ rq = engine->heartbeat.systole;
+ if (rq && i915_request_completed(rq)) {
+ i915_request_put(rq);
+ engine->heartbeat.systole = NULL;
+ }
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return;
+
+ if (intel_gt_is_wedged(engine->gt))
+ goto out;
+
+ if (i915_sched_engine_disabled(engine->sched_engine)) {
+ reset_engine(engine, engine->heartbeat.systole);
+ goto out;
+ }
+
+ if (engine->heartbeat.systole) {
+ long delay = READ_ONCE(engine->props.heartbeat_interval_ms);
+
+ /* Safeguard against too-fast worker invocations */
+ if (!time_after(jiffies,
+ rq->emitted_jiffies + msecs_to_jiffies(delay)))
+ goto out;
+
+ if (!i915_sw_fence_signaled(&rq->submit)) {
+ /*
+ * Not yet submitted, system is stalled.
+ *
+ * This more often happens for ring submission,
+ * where all contexts are funnelled into a common
+ * ringbuffer. If one context is blocked on an
+ * external fence, not only is it not submitted,
+ * but all other contexts, including the kernel
+ * context are stuck waiting for the signal.
+ */
+ } else if (engine->sched_engine->schedule &&
+ rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
+ /*
+ * Gradually raise the priority of the heartbeat to
+ * give high priority work [which presumably desires
+ * low latency and no jitter] the chance to naturally
+ * complete before being preempted.
+ */
+ attr.priority = 0;
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority = I915_PRIORITY_HEARTBEAT;
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority = I915_PRIORITY_BARRIER;
+
+ local_bh_disable();
+ engine->sched_engine->schedule(rq, &attr);
+ local_bh_enable();
+ } else {
+ reset_engine(engine, rq);
+ }
+
+ rq->emitted_jiffies = jiffies;
+ goto out;
+ }
+
+ serial = READ_ONCE(engine->serial);
+ if (engine->wakeref_serial == serial)
+ goto out;
+
+ if (!mutex_trylock(&ce->timeline->mutex)) {
+ /* Unable to lock the kernel timeline, is the engine stuck? */
+ if (xchg(&engine->heartbeat.blocked, serial) == serial)
+ intel_gt_handle_error(engine->gt, engine->mask,
+ I915_ERROR_CAPTURE,
+ "no heartbeat on %s",
+ engine->name);
+ goto out;
+ }
+
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ if (IS_ERR(rq))
+ goto unlock;
+
+ heartbeat_commit(rq, &attr);
+
+unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out:
+ if (!engine->i915->params.enable_hangcheck || !next_heartbeat(engine))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ intel_engine_pm_put(engine);
+}
+
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
+{
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return;
+
+ next_heartbeat(engine);
+}
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
+{
+ if (cancel_delayed_work(&engine->heartbeat.work))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+}
+
+void intel_gt_unpark_heartbeats(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ if (intel_engine_pm_is_awake(engine))
+ intel_engine_unpark_heartbeat(engine);
+}
+
+void intel_gt_park_heartbeats(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_park_heartbeat(engine);
+}
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+{
+ INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
+}
+
+static int __intel_engine_pulse(struct intel_engine_cs *engine)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+
+ lockdep_assert_held(&ce->timeline->mutex);
+ GEM_BUG_ON(!intel_engine_has_preemption(engine));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+
+ heartbeat_commit(rq, &attr);
+ GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+
+ return 0;
+}
+
+static unsigned long set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ unsigned long old;
+
+ old = xchg(&engine->props.heartbeat_interval_ms, delay);
+ if (delay)
+ intel_engine_unpark_heartbeat(engine);
+ else
+ intel_engine_park_heartbeat(engine);
+
+ return old;
+}
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ struct intel_context *ce = engine->kernel_context;
+ int err = 0;
+
+ if (!delay && !intel_engine_has_preempt_reset(engine))
+ return -ENODEV;
+
+ intel_engine_pm_get(engine);
+
+ err = mutex_lock_interruptible(&ce->timeline->mutex);
+ if (err)
+ goto out_rpm;
+
+ if (delay != engine->props.heartbeat_interval_ms) {
+ unsigned long saved = set_heartbeat(engine, delay);
+
+ /* recheck current execution */
+ if (intel_engine_has_preemption(engine)) {
+ err = __intel_engine_pulse(engine);
+ if (err)
+ set_heartbeat(engine, saved);
+ }
+ }
+
+ mutex_unlock(&ce->timeline->mutex);
+
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+int intel_engine_pulse(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = engine->kernel_context;
+ int err;
+
+ if (!intel_engine_has_preemption(engine))
+ return -ENODEV;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+ err = -EINTR;
+ if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = __intel_engine_pulse(engine);
+ mutex_unlock(&ce->timeline->mutex);
+ }
+
+ intel_engine_flush_submission(engine);
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+int intel_engine_flush_barriers(struct intel_engine_cs *engine)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+ int err;
+
+ if (llist_empty(&engine->barrier_tasks))
+ return 0;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+ if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = -EINTR;
+ goto out_rpm;
+ }
+
+ rq = heartbeat_create(ce, GFP_KERNEL);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unlock;
+ }
+
+ heartbeat_commit(rq, &attr);
+
+ err = 0;
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_heartbeat.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
new file mode 100644
index 000000000..5da6d809a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_HEARTBEAT_H
+#define INTEL_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+struct intel_gt;
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay);
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
+
+void intel_gt_park_heartbeats(struct intel_gt *gt);
+void intel_gt_unpark_heartbeats(struct intel_gt *gt);
+
+int intel_engine_pulse(struct intel_engine_cs *engine);
+int intel_engine_flush_barriers(struct intel_engine_cs *engine);
+
+#endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
new file mode 100644
index 000000000..b0a4a2dbe
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_ring.h"
+#include "shmem_utils.h"
+
+static void dbg_poison_ce(struct intel_context *ce)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ if (ce->state) {
+ struct drm_i915_gem_object *obj = ce->state->obj;
+ int type = i915_coherent_map_type(ce->engine->i915, obj, true);
+ void *map;
+
+ if (!i915_gem_object_trylock(obj, NULL))
+ return;
+
+ map = i915_gem_object_pin_map(obj, type);
+ if (!IS_ERR(map)) {
+ memset(map, CONTEXT_REDZONE, obj->base.size);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+ }
+ i915_gem_object_unlock(obj);
+ }
+}
+
+static int __engine_unpark(struct intel_wakeref *wf)
+{
+ struct intel_engine_cs *engine =
+ container_of(wf, typeof(*engine), wakeref);
+ struct intel_context *ce;
+
+ ENGINE_TRACE(engine, "\n");
+
+ intel_gt_pm_get(engine->gt);
+
+ /* Discard stale context state from across idling */
+ ce = engine->kernel_context;
+ if (ce) {
+ GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
+
+ /* Flush all pending HW writes before we touch the context */
+ while (unlikely(intel_context_inflight(ce)))
+ intel_engine_flush_submission(engine);
+
+ /* First poison the image to verify we never fully trust it */
+ dbg_poison_ce(ce);
+
+ /* Scrub the context image after our loss of control */
+ ce->ops->reset(ce);
+
+ CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
+ ce->timeline->seqno,
+ READ_ONCE(*ce->timeline->hwsp_seqno),
+ ce->ring->emit);
+ GEM_BUG_ON(ce->timeline->seqno !=
+ READ_ONCE(*ce->timeline->hwsp_seqno));
+ }
+
+ if (engine->unpark)
+ engine->unpark(engine);
+
+ intel_breadcrumbs_unpark(engine->breadcrumbs);
+ intel_engine_unpark_heartbeat(engine);
+ return 0;
+}
+
+static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct i915_request *rq = to_request(fence);
+
+ ewma__engine_latency_add(&rq->engine->latency,
+ ktime_us_delta(rq->fence.timestamp,
+ rq->duration.emitted));
+}
+
+static void
+__queue_and_release_pm(struct i915_request *rq,
+ struct intel_timeline *tl,
+ struct intel_engine_cs *engine)
+{
+ struct intel_gt_timelines *timelines = &engine->gt->timelines;
+
+ ENGINE_TRACE(engine, "parking\n");
+
+ /*
+ * We have to serialise all potential retirement paths with our
+ * submission, as we don't want to underflow either the
+ * engine->wakeref.counter or our timeline->active_count.
+ *
+ * Equally, we cannot allow a new submission to start until
+ * after we finish queueing, nor could we allow that submitter
+ * to retire us before we are ready!
+ */
+ spin_lock(&timelines->lock);
+
+ /* Let intel_gt_retire_requests() retire us (acquired under lock) */
+ if (!atomic_fetch_inc(&tl->active_count))
+ list_add_tail(&tl->link, &timelines->active_list);
+
+ /* Hand the request over to HW and so engine_retire() */
+ __i915_request_queue_bh(rq);
+
+ /* Let new submissions commence (and maybe retire this timeline) */
+ __intel_wakeref_defer_park(&engine->wakeref);
+
+ spin_unlock(&timelines->lock);
+}
+
+static bool switch_to_kernel_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+ bool result = true;
+
+ /*
+ * This is execlist specific behaviour intended to ensure the GPU is
+ * idle by switching to a known 'safe' context. With GuC submission, the
+ * same idle guarantee is achieved by other means (disabling
+ * scheduling). Further, switching to a 'safe' context has no effect
+ * with GuC submission as the scheduler can just switch back again.
+ *
+ * FIXME: Move this backend scheduler specific behaviour into the
+ * scheduler backend.
+ */
+ if (intel_engine_uses_guc(engine))
+ return true;
+
+ /* GPU is pointing to the void, as good as in the kernel context. */
+ if (intel_gt_is_wedged(engine->gt))
+ return true;
+
+ GEM_BUG_ON(!intel_context_is_barrier(ce));
+ GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
+
+ /* Already inside the kernel context, safe to power down. */
+ if (engine->wakeref_serial == engine->serial)
+ return true;
+
+ /*
+ * Note, we do this without taking the timeline->mutex. We cannot
+ * as we may be called while retiring the kernel context and so
+ * already underneath the timeline->mutex. Instead we rely on the
+ * exclusive property of the __engine_park that prevents anyone
+ * else from creating a request on this engine. This also requires
+ * that the ring is empty and we avoid any waits while constructing
+ * the context, as they assume protection by the timeline->mutex.
+ * This should hold true as we can only park the engine after
+ * retiring the last request, thus all rings should be empty and
+ * all timelines idle.
+ *
+ * For unlocking, there are 2 other parties and the GPU who have a
+ * stake here.
+ *
+ * A new gpu user will be waiting on the engine-pm to start their
+ * engine_unpark. New waiters are predicated on engine->wakeref.count
+ * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
+ * engine->wakeref.
+ *
+ * The other party is intel_gt_retire_requests(), which is walking the
+ * list of active timelines looking for completions. Meanwhile as soon
+ * as we call __i915_request_queue(), the GPU may complete our request.
+ * Ergo, if we put ourselves on the timelines.active_list
+ * (se intel_timeline_enter()) before we increment the
+ * engine->wakeref.count, we may see the request completion and retire
+ * it causing an underflow of the engine->wakeref.
+ */
+ set_bit(CONTEXT_IS_PARKING, &ce->flags);
+ GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
+
+ rq = __i915_request_create(ce, GFP_NOWAIT);
+ if (IS_ERR(rq))
+ /* Context switch failed, hope for the best! Maybe reset? */
+ goto out_unlock;
+
+ /* Check again on the next retirement. */
+ engine->wakeref_serial = engine->serial + 1;
+ i915_request_add_active_barriers(rq);
+
+ /* Install ourselves as a preemption barrier */
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
+ /*
+ * Use an interrupt for precise measurement of duration,
+ * otherwise we rely on someone else retiring all the requests
+ * which may delay the signaling (i.e. we will likely wait
+ * until the background request retirement running every
+ * second or two).
+ */
+ BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
+ dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
+ rq->duration.emitted = ktime_get();
+ }
+
+ /* Expose ourselves to the world */
+ __queue_and_release_pm(rq, ce->timeline, engine);
+
+ result = false;
+out_unlock:
+ clear_bit(CONTEXT_IS_PARKING, &ce->flags);
+ return result;
+}
+
+static void call_idle_barriers(struct intel_engine_cs *engine)
+{
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ struct dma_fence_cb *cb =
+ container_of((struct list_head *)node,
+ typeof(*cb), node);
+
+ cb->func(ERR_PTR(-EAGAIN), cb);
+ }
+}
+
+static int __engine_park(struct intel_wakeref *wf)
+{
+ struct intel_engine_cs *engine =
+ container_of(wf, typeof(*engine), wakeref);
+
+ engine->saturated = 0;
+
+ /*
+ * If one and only one request is completed between pm events,
+ * we know that we are inside the kernel context and it is
+ * safe to power down. (We are paranoid in case that runtime
+ * suspend causes corruption to the active context image, and
+ * want to avoid that impacting userspace.)
+ */
+ if (!switch_to_kernel_context(engine))
+ return -EBUSY;
+
+ ENGINE_TRACE(engine, "parked\n");
+
+ call_idle_barriers(engine); /* cleanup after wedging */
+
+ intel_engine_park_heartbeat(engine);
+ intel_breadcrumbs_park(engine->breadcrumbs);
+
+ /* Must be reset upon idling, or we may miss the busy wakeup. */
+ GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
+
+ if (engine->park)
+ engine->park(engine);
+
+ /* While gt calls i915_vma_parked(), we have to break the lock cycle */
+ intel_gt_pm_put_async(engine->gt);
+ return 0;
+}
+
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __engine_unpark,
+ .put = __engine_park,
+};
+
+void intel_engine_init__pm(struct intel_engine_cs *engine)
+{
+ struct intel_runtime_pm *rpm = engine->uncore->rpm;
+
+ intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
+ intel_engine_init_heartbeat(engine);
+}
+
+/**
+ * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
+ * an engine.
+ * @engine: The engine whose pinned contexts we want to reset.
+ *
+ * Typically the pinned context LMEM images lose or get their content
+ * corrupted on suspend. This function resets their images.
+ */
+void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ list_for_each_entry(ce, &engine->pinned_contexts_list,
+ pinned_contexts_link) {
+ /* kernel context gets reset at __engine_unpark() */
+ if (ce == engine->kernel_context)
+ continue;
+
+ dbg_poison_ce(ce);
+ ce->ops->reset(ce);
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
new file mode 100644
index 000000000..d68675925
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_PM_H
+#define INTEL_ENGINE_PM_H
+
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "intel_engine_types.h"
+#include "intel_wakeref.h"
+#include "intel_gt_pm.h"
+
+static inline bool
+intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
+{
+ return intel_wakeref_is_active(&engine->wakeref);
+}
+
+static inline void __intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ __intel_wakeref_get(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ intel_wakeref_get(&engine->wakeref);
+}
+
+static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+{
+ return intel_wakeref_get_if_active(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_is_virtual(engine)) {
+ intel_wakeref_might_get(&engine->wakeref);
+ } else {
+ struct intel_gt *gt = engine->gt;
+ struct intel_engine_cs *tengine;
+ intel_engine_mask_t tmp, mask = engine->mask;
+
+ for_each_engine_masked(tengine, gt, mask, tmp)
+ intel_wakeref_might_get(&tengine->wakeref);
+ }
+ intel_gt_pm_might_get(engine->gt);
+}
+
+static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put_async(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ intel_wakeref_put_delay(&engine->wakeref, delay);
+}
+
+static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
+{
+ intel_wakeref_unlock_wait(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_is_virtual(engine)) {
+ intel_wakeref_might_put(&engine->wakeref);
+ } else {
+ struct intel_gt *gt = engine->gt;
+ struct intel_engine_cs *tengine;
+ intel_engine_mask_t tmp, mask = engine->mask;
+
+ for_each_engine_masked(tengine, gt, mask, tmp)
+ intel_wakeref_might_put(&tengine->wakeref);
+ }
+ intel_gt_pm_might_put(engine->gt);
+}
+
+static inline struct i915_request *
+intel_engine_create_kernel_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ /*
+ * The engine->kernel_context is special as it is used inside
+ * the engine-pm barrier (see __engine_park()), circumventing
+ * the usual mutexes and relying on the engine-pm barrier
+ * instead. So whenever we use the engine->kernel_context
+ * outside of the barrier, we must manually handle the
+ * engine wakeref to serialise with the use inside.
+ */
+ intel_engine_pm_get(engine);
+ rq = i915_request_create(engine->kernel_context);
+ intel_engine_pm_put(engine);
+
+ return rq;
+}
+
+void intel_engine_init__pm(struct intel_engine_cs *engine);
+
+void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine);
+
+#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
new file mode 100644
index 000000000..fe1a0d5fd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_REGS__
+#define __INTEL_ENGINE_REGS__
+
+#include "i915_reg_defs.h"
+
+#define RING_EXCC(base) _MMIO((base) + 0x28)
+#define RING_TAIL(base) _MMIO((base) + 0x30)
+#define TAIL_ADDR 0x001FFFF8
+#define RING_HEAD(base) _MMIO((base) + 0x34)
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START(base) _MMIO((base) + 0x38)
+#define RING_CTL(base) _MMIO((base) + 0x3c)
+#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+#define RING_SYNC_0(base) _MMIO((base) + 0x40)
+#define RING_SYNC_1(base) _MMIO((base) + 0x44)
+#define RING_SYNC_2(base) _MMIO((base) + 0x48)
+#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
+#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
+#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE))
+#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
+#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
+#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
+#define RING_PSMI_CTL(base) _MMIO((base) + 0x50)
+#define GEN8_RC_SEMA_IDLE_MSG_DISABLE REG_BIT(12)
+#define GEN8_FF_DOP_CLOCK_GATE_DISABLE REG_BIT(10)
+#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7)
+#define GEN6_BSD_GO_INDICATOR REG_BIT(4)
+#define GEN6_BSD_SLEEP_INDICATOR REG_BIT(3)
+#define GEN6_BSD_SLEEP_FLUSH_DISABLE REG_BIT(2)
+#define GEN6_PSMI_SLEEP_MSG_DISABLE REG_BIT(0)
+#define RING_MAX_IDLE(base) _MMIO((base) + 0x54)
+#define PWRCTX_MAXCNT(base) _MMIO((base) + 0x54)
+#define IDLE_TIME_MASK 0xFFFFF
+#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c)
+#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */
+#define RING_IPEIR(base) _MMIO((base) + 0x64)
+#define RING_IPEHR(base) _MMIO((base) + 0x68)
+#define RING_INSTDONE(base) _MMIO((base) + 0x6c)
+#define RING_INSTPS(base) _MMIO((base) + 0x70)
+#define RING_DMA_FADD(base) _MMIO((base) + 0x78)
+#define RING_ACTHD(base) _MMIO((base) + 0x74)
+#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
+#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84)
+#define IPEIR(base) _MMIO((base) + 0x88)
+#define IPEHR(base) _MMIO((base) + 0x8c)
+#define RING_ID(base) _MMIO((base) + 0x8c)
+#define RING_NOPID(base) _MMIO((base) + 0x94)
+#define RING_HWSTAM(base) _MMIO((base) + 0x98)
+#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
+#define ASYNC_FLIP_PERF_DISABLE REG_BIT(14)
+#define MI_FLUSH_ENABLE REG_BIT(12)
+#define TGL_NESTED_BB_EN REG_BIT(12)
+#define MODE_IDLE REG_BIT(9)
+#define STOP_RING REG_BIT(8)
+#define VS_TIMER_DISPATCH REG_BIT(6)
+#define RING_IMR(base) _MMIO((base) + 0xa8)
+#define RING_EIR(base) _MMIO((base) + 0xb0)
+#define RING_EMR(base) _MMIO((base) + 0xb4)
+#define RING_ESR(base) _MMIO((base) + 0xb8)
+#define RING_INSTPM(base) _MMIO((base) + 0xc0)
+#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4)
+#define ACTHD(base) _MMIO((base) + 0xc8)
+#define GEN8_R_PWR_CLK_STATE(base) _MMIO((base) + 0xc8)
+#define GEN8_RPCS_ENABLE (1 << 31)
+#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
+#define GEN8_RPCS_S_CNT_SHIFT 15
+#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
+#define GEN11_RPCS_S_CNT_SHIFT 12
+#define GEN11_RPCS_S_CNT_MASK (0x3f << GEN11_RPCS_S_CNT_SHIFT)
+#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
+#define GEN8_RPCS_SS_CNT_SHIFT 8
+#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
+#define GEN8_RPCS_EU_MAX_SHIFT 4
+#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT)
+#define GEN8_RPCS_EU_MIN_SHIFT 0
+#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
+
+#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
+#define RESET_CTL_CAT_ERROR REG_BIT(2)
+#define RESET_CTL_READY_TO_RESET REG_BIT(1)
+#define RESET_CTL_REQUEST_RESET REG_BIT(0)
+#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0)
+#define RING_BBSTATE(base) _MMIO((base) + 0x110)
+#define RING_BB_PPGTT (1 << 5)
+#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
+#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
+#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
+#define RING_BBADDR(base) _MMIO((base) + 0x140)
+#define RING_BB_OFFSET(base) _MMIO((base) + 0x158)
+#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
+#define CCID(base) _MMIO((base) + 0x180)
+#define CCID_EN BIT(0)
+#define CCID_EXTENDED_STATE_RESTORE BIT(2)
+#define CCID_EXTENDED_STATE_SAVE BIT(3)
+#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
+#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
+#define ECOSKPD(base) _MMIO((base) + 0x1d0)
+#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4)
+#define ECO_GATING_CX_ONLY REG_BIT(3)
+#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3)
+#define ECO_FLIP_DONE REG_BIT(0)
+#define GEN6_BLITTER_LOCK_SHIFT 16
+
+#define BLIT_CCTL(base) _MMIO((base) + 0x204)
+#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8)
+#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0)
+#define BLIT_CCTL_MASK (BLIT_CCTL_DST_MOCS_MASK | \
+ BLIT_CCTL_SRC_MOCS_MASK)
+#define BLIT_CCTL_MOCS(dst, src) \
+ (REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \
+ REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1))
+
+#define RING_CSCMDOP(base) _MMIO((base) + 0x20c)
+
+/*
+ * CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
+ * The lsb of each can be considered a separate enabling bit for encryption.
+ * 6:0 == default MOCS value for reads => 6:1 == table index for reads.
+ * 13:7 == default MOCS value for writes => 13:8 == table index for writes.
+ * 15:14 == Reserved => 31:30 are set to 0.
+ */
+#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 7)
+#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 0)
+#define CMD_CCTL_MOCS_MASK (CMD_CCTL_WRITE_OVERRIDE_MASK | \
+ CMD_CCTL_READ_OVERRIDE_MASK)
+#define CMD_CCTL_MOCS_OVERRIDE(write, read) \
+ (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \
+ REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1))
+
+#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8) /* gen12+ */
+
+#define MI_PREDICATE_RESULT_2(base) _MMIO((base) + 0x3bc)
+#define LOWER_SLICE_ENABLED (1 << 0)
+#define LOWER_SLICE_DISABLED (0 << 0)
+#define MI_PREDICATE_SRC0(base) _MMIO((base) + 0x400)
+#define MI_PREDICATE_SRC0_UDW(base) _MMIO((base) + 0x400 + 4)
+#define MI_PREDICATE_SRC1(base) _MMIO((base) + 0x408)
+#define MI_PREDICATE_SRC1_UDW(base) _MMIO((base) + 0x408 + 4)
+#define MI_PREDICATE_DATA(base) _MMIO((base) + 0x410)
+#define MI_PREDICATE_RESULT(base) _MMIO((base) + 0x418)
+#define MI_PREDICATE_RESULT_1(base) _MMIO((base) + 0x41c)
+
+#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
+#define PP_DIR_DCLV_2G 0xffffffff
+#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
+#define RING_ELSP(base) _MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
+#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1)
+#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8)
+#define RING_CTX_SR_CTL(base) _MMIO((base) + 0x244)
+#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
+#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8)
+#define RING_MODE_GEN7(base) _MMIO((base) + 0x29c)
+#define GFX_RUN_LIST_ENABLE (1 << 15)
+#define GFX_INTERRUPT_STEERING (1 << 14)
+#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13)
+#define GFX_SURFACE_FAULT_ENABLE (1 << 12)
+#define GFX_REPLAY_MODE (1 << 11)
+#define GFX_PSMI_GRANULARITY (1 << 10)
+#define GEN12_GFX_PREFETCH_DISABLE REG_BIT(10)
+#define GFX_PPGTT_ENABLE (1 << 9)
+#define GEN8_GFX_PPGTT_48B (1 << 7)
+#define GFX_FORWARD_VBLANK_MASK (3 << 5)
+#define GFX_FORWARD_VBLANK_NEVER (0 << 5)
+#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5)
+#define GFX_FORWARD_VBLANK_COND (2 << 5)
+#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
+#define RING_TIMESTAMP(base) _MMIO((base) + 0x358)
+#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4)
+#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
+#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
+#define RING_PREDICATE_RESULT(base) _MMIO((base) + 0x3b8)
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_DENY REG_BIT(30)
+#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
+#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28)
+#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0)
+#define RING_FORCE_TO_NONPRIV_MASK_VALID \
+ (RING_FORCE_TO_NONPRIV_RANGE_MASK | \
+ RING_FORCE_TO_NONPRIV_ACCESS_MASK | \
+ RING_FORCE_TO_NONPRIV_DENY)
+#define RING_MAX_NONPRIV_SLOTS 12
+
+#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
+#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
+#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
+#define EL_CTRL_LOAD REG_BIT(0)
+
+/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
+#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
+#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
+
+#define GEN11_VCS_SFC_FORCED_LOCK(base) _MMIO((base) + 0x88c)
+#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_STATUS(base) _MMIO((base) + 0x890)
+#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
+
+#define GEN11_VECS_SFC_FORCED_LOCK(base) _MMIO((base) + 0x201c)
+#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VECS_SFC_LOCK_ACK(base) _MMIO((base) + 0x2018)
+#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
+#define GEN11_VECS_SFC_USAGE(base) _MMIO((base) + 0x2014)
+#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+
+#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+
+#define GEN12_HCP_SFC_LOCK_STATUS(base) _MMIO((base) + 0x2914)
+#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1)
+#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0)
+
+#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10)
+#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+
+#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
+#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
+
+
+#endif /* __INTEL_ENGINE_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
new file mode 100644
index 000000000..8e762d683
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_STATS_H__
+#define __INTEL_ENGINE_STATS_H__
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/seqlock.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "intel_engine.h"
+
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+ unsigned long flags;
+
+ if (stats->active) {
+ stats->active++;
+ return;
+ }
+
+ /* The writer is serialised; but the pmu reader may be from hardirq */
+ local_irq_save(flags);
+ write_seqcount_begin(&stats->lock);
+
+ stats->start = ktime_get();
+ stats->active++;
+
+ write_seqcount_end(&stats->lock);
+ local_irq_restore(flags);
+
+ GEM_BUG_ON(!stats->active);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+ unsigned long flags;
+
+ GEM_BUG_ON(!stats->active);
+ if (stats->active > 1) {
+ stats->active--;
+ return;
+ }
+
+ local_irq_save(flags);
+ write_seqcount_begin(&stats->lock);
+
+ stats->active--;
+ stats->total = ktime_add(stats->total,
+ ktime_sub(ktime_get(), stats->start));
+
+ write_seqcount_end(&stats->lock);
+ local_irq_restore(flags);
+}
+
+#endif /* __INTEL_ENGINE_STATS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
new file mode 100644
index 000000000..107f465a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -0,0 +1,666 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_TYPES__
+#define __INTEL_ENGINE_TYPES__
+
+#include <linux/average.h>
+#include <linux/hashtable.h>
+#include <linux/irq_work.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/rbtree.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "i915_gem.h"
+#include "i915_pmu.h"
+#include "i915_priolist_types.h"
+#include "i915_selftest.h"
+#include "intel_sseu.h"
+#include "intel_timeline_types.h"
+#include "intel_uncore.h"
+#include "intel_wakeref.h"
+#include "intel_workarounds_types.h"
+
+/* HW Engine class + instance */
+#define RENDER_CLASS 0
+#define VIDEO_DECODE_CLASS 1
+#define VIDEO_ENHANCEMENT_CLASS 2
+#define COPY_ENGINE_CLASS 3
+#define OTHER_CLASS 4
+#define COMPUTE_CLASS 5
+#define MAX_ENGINE_CLASS 5
+#define MAX_ENGINE_INSTANCE 8
+
+#define I915_MAX_SLICES 3
+#define I915_MAX_SUBSLICES 8
+
+#define I915_CMD_HASH_ORDER 9
+
+struct dma_fence;
+struct drm_i915_gem_object;
+struct drm_i915_reg_table;
+struct i915_gem_context;
+struct i915_request;
+struct i915_sched_attr;
+struct i915_sched_engine;
+struct intel_gt;
+struct intel_ring;
+struct intel_uncore;
+struct intel_breadcrumbs;
+
+typedef u32 intel_engine_mask_t;
+#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
+
+struct intel_hw_status_page {
+ struct list_head timelines;
+ struct i915_vma *vma;
+ u32 *addr;
+};
+
+struct intel_instdone {
+ u32 instdone;
+ /* The following exist only in the RCS engine */
+ u32 slice_common;
+ u32 slice_common_extra[2];
+ u32 sampler[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
+ u32 row[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
+
+ /* Added in XeHPG */
+ u32 geom_svg[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
+};
+
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ * offset: specifies batch starting position, also helpful in case
+ * if we want to have multiple batches at different offsets based on
+ * some criteria. It is not a requirement at the moment but provides
+ * an option for future use.
+ * size: size of the batch in DWORDS
+ */
+struct i915_ctx_workarounds {
+ struct i915_wa_ctx_bb {
+ u32 offset;
+ u32 size;
+ } indirect_ctx, per_ctx;
+ struct i915_vma *vma;
+};
+
+#define I915_MAX_VCS 8
+#define I915_MAX_VECS 4
+#define I915_MAX_SFC (I915_MAX_VCS / 2)
+#define I915_MAX_CCS 4
+#define I915_MAX_RCS 1
+#define I915_MAX_BCS 9
+
+/*
+ * Engine IDs definitions.
+ * Keep instances of the same type engine together.
+ */
+enum intel_engine_id {
+ RCS0 = 0,
+ BCS0,
+ BCS1,
+ BCS2,
+ BCS3,
+ BCS4,
+ BCS5,
+ BCS6,
+ BCS7,
+ BCS8,
+#define _BCS(n) (BCS0 + (n))
+ VCS0,
+ VCS1,
+ VCS2,
+ VCS3,
+ VCS4,
+ VCS5,
+ VCS6,
+ VCS7,
+#define _VCS(n) (VCS0 + (n))
+ VECS0,
+ VECS1,
+ VECS2,
+ VECS3,
+#define _VECS(n) (VECS0 + (n))
+ CCS0,
+ CCS1,
+ CCS2,
+ CCS3,
+#define _CCS(n) (CCS0 + (n))
+ I915_NUM_ENGINES
+#define INVALID_ENGINE ((enum intel_engine_id)-1)
+};
+
+/* A simple estimator for the round-trip latency of an engine */
+DECLARE_EWMA(_engine_latency, 6, 4)
+
+struct st_preempt_hang {
+ struct completion completion;
+ unsigned int count;
+};
+
+/**
+ * struct intel_engine_execlists - execlist submission queue and port state
+ *
+ * The struct intel_engine_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_engine_execlists {
+ /**
+ * @timer: kick the current context if its timeslice expires
+ */
+ struct timer_list timer;
+
+ /**
+ * @preempt: reset the current context if it fails to give way
+ */
+ struct timer_list preempt;
+
+ /**
+ * @preempt_target: active request at the time of the preemption request
+ *
+ * We force a preemption to occur if the pending contexts have not
+ * been promoted to active upon receipt of the CS ack event within
+ * the timeout. This timeout maybe chosen based on the target,
+ * using a very short timeout if the context is no longer schedulable.
+ * That short timeout may not be applicable to other contexts, so
+ * if a context switch should happen within before the preemption
+ * timeout, we may shoot early at an innocent context. To prevent this,
+ * we record which context was active at the time of the preemption
+ * request and only reset that context upon the timeout.
+ */
+ const struct i915_request *preempt_target;
+
+ /**
+ * @ccid: identifier for contexts submitted to this engine
+ */
+ u32 ccid;
+
+ /**
+ * @yield: CCID at the time of the last semaphore-wait interrupt.
+ *
+ * Instead of leaving a semaphore busy-spinning on an engine, we would
+ * like to switch to another ready context, i.e. yielding the semaphore
+ * timeslice.
+ */
+ u32 yield;
+
+ /**
+ * @error_interrupt: CS Master EIR
+ *
+ * The CS generates an interrupt when it detects an error. We capture
+ * the first error interrupt, record the EIR and schedule the tasklet.
+ * In the tasklet, we process the pending CS events to ensure we have
+ * the guilty request, and then reset the engine.
+ *
+ * Low 16b are used by HW, with the upper 16b used as the enabling mask.
+ * Reserve the upper 16b for tracking internal errors.
+ */
+ u32 error_interrupt;
+#define ERROR_CSB BIT(31)
+#define ERROR_PREEMPT BIT(30)
+
+ /**
+ * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
+ */
+ u32 reset_ccid;
+
+ /**
+ * @submit_reg: gen-specific execlist submission register
+ * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+ * the ExecList Submission Queue Contents register array for Gen11+
+ */
+ u32 __iomem *submit_reg;
+
+ /**
+ * @ctrl_reg: the enhanced execlists control register, used to load the
+ * submit queue on the HW and to request preemptions to idle
+ */
+ u32 __iomem *ctrl_reg;
+
+#define EXECLIST_MAX_PORTS 2
+ /**
+ * @active: the currently known context executing on HW
+ */
+ struct i915_request * const *active;
+ /**
+ * @inflight: the set of contexts submitted and acknowleged by HW
+ *
+ * The set of inflight contexts is managed by reading CS events
+ * from the HW. On a context-switch event (not preemption), we
+ * know the HW has transitioned from port0 to port1, and we
+ * advance our inflight/active tracking accordingly.
+ */
+ struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
+ /**
+ * @pending: the next set of contexts submitted to ELSP
+ *
+ * We store the array of contexts that we submit to HW (via ELSP) and
+ * promote them to the inflight array once HW has signaled the
+ * preemption or idle-to-active event.
+ */
+ struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
+
+ /**
+ * @port_mask: number of execlist ports - 1
+ */
+ unsigned int port_mask;
+
+ /**
+ * @virtual: Queue of requets on a virtual engine, sorted by priority.
+ * Each RB entry is a struct i915_priolist containing a list of requests
+ * of the same priority.
+ */
+ struct rb_root_cached virtual;
+
+ /**
+ * @csb_write: control register for Context Switch buffer
+ *
+ * Note this register may be either mmio or HWSP shadow.
+ */
+ u32 *csb_write;
+
+ /**
+ * @csb_status: status array for Context Switch buffer
+ *
+ * Note these register may be either mmio or HWSP shadow.
+ */
+ u64 *csb_status;
+
+ /**
+ * @csb_size: context status buffer FIFO size
+ */
+ u8 csb_size;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ u8 csb_head;
+
+ I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
+};
+
+#define INTEL_ENGINE_CS_MAX_NAME 8
+
+struct intel_engine_execlists_stats {
+ /**
+ * @active: Number of contexts currently scheduled in.
+ */
+ unsigned int active;
+
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqcount_t lock;
+
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases where
+ * engine is currently busy (active > 0).
+ */
+ ktime_t total;
+
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+};
+
+struct intel_engine_guc_stats {
+ /**
+ * @running: Active state of the engine when busyness was last sampled.
+ */
+ bool running;
+
+ /**
+ * @prev_total: Previous value of total runtime clock cycles.
+ */
+ u32 prev_total;
+
+ /**
+ * @total_gt_clks: Total gt clock cycles this engine was busy.
+ */
+ u64 total_gt_clks;
+
+ /**
+ * @start_gt_clk: GT clock time of last idle to active transition.
+ */
+ u64 start_gt_clk;
+};
+
+struct intel_engine_cs {
+ struct drm_i915_private *i915;
+ struct intel_gt *gt;
+ struct intel_uncore *uncore;
+ char name[INTEL_ENGINE_CS_MAX_NAME];
+
+ enum intel_engine_id id;
+ enum intel_engine_id legacy_idx;
+
+ unsigned int guc_id;
+
+ intel_engine_mask_t mask;
+ u32 reset_domain;
+ /**
+ * @logical_mask: logical mask of engine, reported to user space via
+ * query IOCTL and used to communicate with the GuC in logical space.
+ * The logical instance of a physical engine can change based on product
+ * and fusing.
+ */
+ intel_engine_mask_t logical_mask;
+
+ u8 class;
+ u8 instance;
+
+ u16 uabi_class;
+ u16 uabi_instance;
+
+ u32 uabi_capabilities;
+ u32 context_size;
+ u32 mmio_base;
+
+ /*
+ * Some w/a require forcewake to be held (which prevents RC6) while
+ * a particular engine is active. If so, we set fw_domain to which
+ * domains need to be held for the duration of request activity,
+ * and 0 if none. We try to limit the duration of the hold as much
+ * as possible.
+ */
+ enum forcewake_domains fw_domain;
+ unsigned int fw_active;
+
+ unsigned long context_tag;
+
+ struct rb_node uabi_node;
+
+ struct intel_sseu sseu;
+
+ struct i915_sched_engine *sched_engine;
+
+ /* keep a request in reserve for a [pm] barrier under oom */
+ struct i915_request *request_pool;
+
+ struct intel_context *hung_ce;
+
+ struct llist_head barrier_tasks;
+
+ struct intel_context *kernel_context; /* pinned */
+
+ /**
+ * pinned_contexts_list: List of pinned contexts. This list is only
+ * assumed to be manipulated during driver load- or unload time and
+ * does therefore not have any additional protection.
+ */
+ struct list_head pinned_contexts_list;
+
+ intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
+ struct {
+ struct delayed_work work;
+ struct i915_request *systole;
+ unsigned long blocked;
+ } heartbeat;
+
+ unsigned long serial;
+
+ unsigned long wakeref_serial;
+ struct intel_wakeref wakeref;
+ struct file *default_state;
+
+ struct {
+ struct intel_ring *ring;
+ struct intel_timeline *timeline;
+ } legacy;
+
+ /*
+ * We track the average duration of the idle pulse on parking the
+ * engine to keep an estimate of the how the fast the engine is
+ * under ideal conditions.
+ */
+ struct ewma__engine_latency latency;
+
+ /* Keep track of all the seqno used, a trail of breadcrumbs */
+ struct intel_breadcrumbs *breadcrumbs;
+
+ struct intel_engine_pmu {
+ /**
+ * @enable: Bitmask of enable sample events on this engine.
+ *
+ * Bits correspond to sample event types, for instance
+ * I915_SAMPLE_QUEUED is bit 0 etc.
+ */
+ u32 enable;
+ /**
+ * @enable_count: Reference count for the enabled samplers.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+ */
+ unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
+ /**
+ * @sample: Counter values for sampling events.
+ *
+ * Our internal timer stores the current counters in this field.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+ */
+ struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
+ } pmu;
+
+ struct intel_hw_status_page status_page;
+ struct i915_ctx_workarounds wa_ctx;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
+
+ u32 irq_keep_mask; /* always keep these interrupts */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ void (*irq_enable)(struct intel_engine_cs *engine);
+ void (*irq_disable)(struct intel_engine_cs *engine);
+ void (*irq_handler)(struct intel_engine_cs *engine, u16 iir);
+
+ void (*sanitize)(struct intel_engine_cs *engine);
+ int (*resume)(struct intel_engine_cs *engine);
+
+ struct {
+ void (*prepare)(struct intel_engine_cs *engine);
+
+ void (*rewind)(struct intel_engine_cs *engine, bool stalled);
+ void (*cancel)(struct intel_engine_cs *engine);
+
+ void (*finish)(struct intel_engine_cs *engine);
+ } reset;
+
+ void (*park)(struct intel_engine_cs *engine);
+ void (*unpark)(struct intel_engine_cs *engine);
+
+ void (*bump_serial)(struct intel_engine_cs *engine);
+
+ void (*set_default_submission)(struct intel_engine_cs *engine);
+
+ const struct intel_context_ops *cops;
+
+ int (*request_alloc)(struct i915_request *rq);
+
+ int (*emit_flush)(struct i915_request *request, u32 mode);
+#define EMIT_INVALIDATE BIT(0)
+#define EMIT_FLUSH BIT(1)
+#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
+ int (*emit_bb_start)(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+ int (*emit_init_breadcrumb)(struct i915_request *rq);
+ u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
+ u32 *cs);
+ unsigned int emit_fini_breadcrumb_dw;
+
+ /* Pass the request to the hardware queue (e.g. directly into
+ * the legacy ringbuffer or to the end of an execlist).
+ *
+ * This is called from an atomic context with irqs disabled; must
+ * be irq safe.
+ */
+ void (*submit_request)(struct i915_request *rq);
+
+ void (*release)(struct intel_engine_cs *engine);
+
+ /*
+ * Add / remove request from engine active tracking
+ */
+ void (*add_active_request)(struct i915_request *rq);
+ void (*remove_active_request)(struct i915_request *rq);
+
+ /*
+ * Get engine busyness and the time at which the busyness was sampled.
+ */
+ ktime_t (*busyness)(struct intel_engine_cs *engine,
+ ktime_t *now);
+
+ struct intel_engine_execlists execlists;
+
+ /*
+ * Keep track of completed timelines on this engine for early
+ * retirement with the goal of quickly enabling powersaving as
+ * soon as the engine is idle.
+ */
+ struct intel_timeline *retire;
+ struct work_struct retire_work;
+
+ /* status_notifier: list of callbacks for context-switch changes */
+ struct atomic_notifier_head context_status_notifier;
+
+#define I915_ENGINE_USING_CMD_PARSER BIT(0)
+#define I915_ENGINE_SUPPORTS_STATS BIT(1)
+#define I915_ENGINE_HAS_PREEMPTION BIT(2)
+#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
+#define I915_ENGINE_HAS_TIMESLICES BIT(4)
+#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
+#define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8)
+#define I915_ENGINE_HAS_RCS_REG_STATE BIT(9)
+#define I915_ENGINE_HAS_EU_PRIORITY BIT(10)
+#define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11)
+#define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12)
+ unsigned int flags;
+
+ /*
+ * Table of commands the command parser needs to know about
+ * for this engine.
+ */
+ DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
+
+ /*
+ * Table of registers allowed in commands that read/write registers.
+ */
+ const struct drm_i915_reg_table *reg_tables;
+ int reg_table_count;
+
+ /*
+ * Returns the bitmask for the length field of the specified command.
+ * Return 0 for an unrecognized/invalid command.
+ *
+ * If the command parser finds an entry for a command in the engine's
+ * cmd_tables, it gets the command's length based on the table entry.
+ * If not, it calls this function to determine the per-engine length
+ * field encoding for the command (i.e. different opcode ranges use
+ * certain bits to encode the command length in the header).
+ */
+ u32 (*get_cmd_length_mask)(u32 cmd_header);
+
+ struct {
+ union {
+ struct intel_engine_execlists_stats execlists;
+ struct intel_engine_guc_stats guc;
+ };
+
+ /**
+ * @rps: Utilisation at last RPS sampling.
+ */
+ ktime_t rps;
+ } stats;
+
+ struct {
+ unsigned long heartbeat_interval_ms;
+ unsigned long max_busywait_duration_ns;
+ unsigned long preempt_timeout_ms;
+ unsigned long stop_timeout_ms;
+ unsigned long timeslice_duration_ms;
+ } props, defaults;
+
+ I915_SELFTEST_DECLARE(struct fault_attr reset_timeout);
+};
+
+static inline bool
+intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_USING_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_supports_stats(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_SUPPORTS_STATS;
+}
+
+static inline bool
+intel_engine_has_preemption(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_PREEMPTION;
+}
+
+static inline bool
+intel_engine_has_semaphores(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
+}
+
+static inline bool
+intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+{
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
+ return false;
+
+ return engine->flags & I915_ENGINE_HAS_TIMESLICES;
+}
+
+static inline bool
+intel_engine_is_virtual(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_IS_VIRTUAL;
+}
+
+static inline bool
+intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
+{
+ return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
+}
+
+/* Wa_14014475959:dg2 */
+static inline bool
+intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+}
+
+#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
new file mode 100644
index 000000000..46a174f8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/llist.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_user.h"
+#include "intel_gt.h"
+#include "uc/intel_guc_submission.h"
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+ struct rb_node *p = i915->uabi_engines.rb_node;
+
+ while (p) {
+ struct intel_engine_cs *it =
+ rb_entry(p, typeof(*it), uabi_node);
+
+ if (class < it->uabi_class)
+ p = p->rb_left;
+ else if (class > it->uabi_class ||
+ instance > it->uabi_instance)
+ p = p->rb_right;
+ else if (instance < it->uabi_instance)
+ p = p->rb_left;
+ else
+ return it;
+ }
+
+ return NULL;
+}
+
+void intel_engine_add_user(struct intel_engine_cs *engine)
+{
+ llist_add((struct llist_node *)&engine->uabi_node,
+ (struct llist_head *)&engine->i915->uabi_engines);
+}
+
+static const u8 uabi_classes[] = {
+ [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
+ [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
+ [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
+ [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
+ [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
+};
+
+static int engine_cmp(void *priv, const struct list_head *A,
+ const struct list_head *B)
+{
+ const struct intel_engine_cs *a =
+ container_of((struct rb_node *)A, typeof(*a), uabi_node);
+ const struct intel_engine_cs *b =
+ container_of((struct rb_node *)B, typeof(*b), uabi_node);
+
+ if (uabi_classes[a->class] < uabi_classes[b->class])
+ return -1;
+ if (uabi_classes[a->class] > uabi_classes[b->class])
+ return 1;
+
+ if (a->instance < b->instance)
+ return -1;
+ if (a->instance > b->instance)
+ return 1;
+
+ return 0;
+}
+
+static struct llist_node *get_engines(struct drm_i915_private *i915)
+{
+ return llist_del_all((struct llist_head *)&i915->uabi_engines);
+}
+
+static void sort_engines(struct drm_i915_private *i915,
+ struct list_head *engines)
+{
+ struct llist_node *pos, *next;
+
+ llist_for_each_safe(pos, next, get_engines(i915)) {
+ struct intel_engine_cs *engine =
+ container_of((struct rb_node *)pos, typeof(*engine),
+ uabi_node);
+ list_add((struct list_head *)&engine->uabi_node, engines);
+ }
+ list_sort(NULL, engines, engine_cmp);
+}
+
+static void set_scheduler_caps(struct drm_i915_private *i915)
+{
+ static const struct {
+ u8 engine;
+ u8 sched;
+ } map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+ MAP(HAS_PREEMPTION, PREEMPTION),
+ MAP(HAS_SEMAPHORES, SEMAPHORES),
+ MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
+#undef MAP
+ };
+ struct intel_engine_cs *engine;
+ u32 enabled, disabled;
+
+ enabled = 0;
+ disabled = 0;
+ for_each_uabi_engine(engine, i915) { /* all engines must agree! */
+ int i;
+
+ if (engine->sched_engine->schedule)
+ enabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+ else
+ disabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+
+ if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ if (engine->flags & BIT(map[i].engine))
+ enabled |= BIT(map[i].sched);
+ else
+ disabled |= BIT(map[i].sched);
+ }
+ }
+
+ i915->caps.scheduler = enabled & ~disabled;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+ i915->caps.scheduler = 0;
+}
+
+const char *intel_engine_class_repr(u8 class)
+{
+ static const char * const uabi_names[] = {
+ [RENDER_CLASS] = "rcs",
+ [COPY_ENGINE_CLASS] = "bcs",
+ [VIDEO_DECODE_CLASS] = "vcs",
+ [VIDEO_ENHANCEMENT_CLASS] = "vecs",
+ [COMPUTE_CLASS] = "ccs",
+ };
+
+ if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
+ return "xxx";
+
+ return uabi_names[class];
+}
+
+struct legacy_ring {
+ struct intel_gt *gt;
+ u8 class;
+ u8 instance;
+};
+
+static int legacy_ring_idx(const struct legacy_ring *ring)
+{
+ static const struct {
+ u8 base, max;
+ } map[] = {
+ [RENDER_CLASS] = { RCS0, 1 },
+ [COPY_ENGINE_CLASS] = { BCS0, 1 },
+ [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
+ [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
+ [COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
+ };
+
+ if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
+ return INVALID_ENGINE;
+
+ if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
+ return INVALID_ENGINE;
+
+ return map[ring->class].base + ring->instance;
+}
+
+static void add_legacy_ring(struct legacy_ring *ring,
+ struct intel_engine_cs *engine)
+{
+ if (engine->gt != ring->gt || engine->class != ring->class) {
+ ring->gt = engine->gt;
+ ring->class = engine->class;
+ ring->instance = 0;
+ }
+
+ engine->legacy_idx = legacy_ring_idx(ring);
+ if (engine->legacy_idx != INVALID_ENGINE)
+ ring->instance++;
+}
+
+void intel_engines_driver_register(struct drm_i915_private *i915)
+{
+ struct legacy_ring ring = {};
+ struct list_head *it, *next;
+ struct rb_node **p, *prev;
+ LIST_HEAD(engines);
+
+ sort_engines(i915, &engines);
+
+ prev = NULL;
+ p = &i915->uabi_engines.rb_node;
+ list_for_each_safe(it, next, &engines) {
+ struct intel_engine_cs *engine =
+ container_of((struct rb_node *)it, typeof(*engine),
+ uabi_node);
+ char old[sizeof(engine->name)];
+
+ if (intel_gt_has_unrecoverable_error(engine->gt))
+ continue; /* ignore incomplete engines */
+
+ GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
+ engine->uabi_class = uabi_classes[engine->class];
+
+ GEM_BUG_ON(engine->uabi_class >=
+ ARRAY_SIZE(i915->engine_uabi_class_count));
+ engine->uabi_instance =
+ i915->engine_uabi_class_count[engine->uabi_class]++;
+
+ /* Replace the internal name with the final user facing name */
+ memcpy(old, engine->name, sizeof(engine->name));
+ scnprintf(engine->name, sizeof(engine->name), "%s%u",
+ intel_engine_class_repr(engine->class),
+ engine->uabi_instance);
+ DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
+
+ rb_link_node(&engine->uabi_node, prev, p);
+ rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
+
+ GEM_BUG_ON(intel_engine_lookup_user(i915,
+ engine->uabi_class,
+ engine->uabi_instance) != engine);
+
+ /* Fix up the mapping to match default execbuf::user_map[] */
+ add_legacy_ring(&ring, engine);
+
+ prev = &engine->uabi_node;
+ p = &prev->rb_right;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+ struct intel_engine_cs *engine;
+ unsigned int isolation;
+ int class, inst;
+ int errors = 0;
+
+ for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
+ for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
+ engine = intel_engine_lookup_user(i915,
+ class, inst);
+ if (!engine) {
+ pr_err("UABI engine not found for { class:%d, instance:%d }\n",
+ class, inst);
+ errors++;
+ continue;
+ }
+
+ if (engine->uabi_class != class ||
+ engine->uabi_instance != inst) {
+ pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
+ engine->name,
+ engine->uabi_class,
+ engine->uabi_instance,
+ class, inst);
+ errors++;
+ continue;
+ }
+ }
+ }
+
+ /*
+ * Make sure that classes with multiple engine instances all
+ * share the same basic configuration.
+ */
+ isolation = intel_engines_has_context_isolation(i915);
+ for_each_uabi_engine(engine, i915) {
+ unsigned int bit = BIT(engine->uabi_class);
+ unsigned int expected = engine->default_state ? bit : 0;
+
+ if ((isolation & bit) != expected) {
+ pr_err("mismatching default context state for class %d on engine %s\n",
+ engine->uabi_class, engine->name);
+ errors++;
+ }
+ }
+
+ if (drm_WARN(&i915->drm, errors,
+ "Invalid UABI engine mapping found"))
+ i915->uabi_engines = RB_ROOT;
+ }
+
+ set_scheduler_caps(i915);
+}
+
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int which;
+
+ which = 0;
+ for_each_uabi_engine(engine, i915)
+ if (engine->default_state)
+ which |= BIT(engine->uabi_class);
+
+ return which;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h
new file mode 100644
index 000000000..3dc7e8ab9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_USER_H
+#define INTEL_ENGINE_USER_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_engine_cs;
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
+
+void intel_engine_add_user(struct intel_engine_cs *engine);
+void intel_engines_driver_register(struct drm_i915_private *i915);
+
+const char *intel_engine_class_repr(u8 class);
+
+#endif /* INTEL_ENGINE_USER_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
new file mode 100644
index 000000000..f903ee1ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -0,0 +1,4186 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+/**
+ * DOC: Logical Rings, Logical Ring Contexts and Execlists
+ *
+ * Motivation:
+ * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
+ * These expanded contexts enable a number of new abilities, especially
+ * "Execlists" (also implemented in this file).
+ *
+ * One of the main differences with the legacy HW contexts is that logical
+ * ring contexts incorporate many more things to the context's state, like
+ * PDPs or ringbuffer control registers:
+ *
+ * The reason why PDPs are included in the context is straightforward: as
+ * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
+ * contained there mean you don't need to do a ppgtt->switch_mm yourself,
+ * instead, the GPU will do it for you on the context switch.
+ *
+ * But, what about the ringbuffer control registers (head, tail, etc..)?
+ * shouldn't we just need a set of those per engine command streamer? This is
+ * where the name "Logical Rings" starts to make sense: by virtualizing the
+ * rings, the engine cs shifts to a new "ring buffer" with every context
+ * switch. When you want to submit a workload to the GPU you: A) choose your
+ * context, B) find its appropriate virtualized ring, C) write commands to it
+ * and then, finally, D) tell the GPU to switch to that context.
+ *
+ * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
+ * to a contexts is via a context execution list, ergo "Execlists".
+ *
+ * LRC implementation:
+ * Regarding the creation of contexts, we have:
+ *
+ * - One global default context.
+ * - One local default context for each opened fd.
+ * - One local extra context for each context create ioctl call.
+ *
+ * Now that ringbuffers belong per-context (and not per-engine, like before)
+ * and that contexts are uniquely tied to a given engine (and not reusable,
+ * like before) we need:
+ *
+ * - One ringbuffer per-engine inside each context.
+ * - One backing object per-engine inside each context.
+ *
+ * The global default context starts its life with these new objects fully
+ * allocated and populated. The local default context for each opened fd is
+ * more complex, because we don't know at creation time which engine is going
+ * to use them. To handle this, we have implemented a deferred creation of LR
+ * contexts:
+ *
+ * The local context starts its life as a hollow or blank holder, that only
+ * gets populated for a given engine once we receive an execbuffer. If later
+ * on we receive another execbuffer ioctl for the same context but a different
+ * engine, we allocate/populate a new ringbuffer and context backing object and
+ * so on.
+ *
+ * Finally, regarding local contexts created using the ioctl call: as they are
+ * only allowed with the render ring, we can allocate & populate them right
+ * away (no need to defer anything, at least for now).
+ *
+ * Execlists implementation:
+ * Execlists are the new method by which, on gen8+ hardware, workloads are
+ * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
+ * This method works as follows:
+ *
+ * When a request is committed, its commands (the BB start and any leading or
+ * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
+ * for the appropriate context. The tail pointer in the hardware context is not
+ * updated at this time, but instead, kept by the driver in the ringbuffer
+ * structure. A structure representing this request is added to a request queue
+ * for the appropriate engine: this structure contains a copy of the context's
+ * tail after the request was written to the ring buffer and a pointer to the
+ * context itself.
+ *
+ * If the engine's request queue was empty before the request was added, the
+ * queue is processed immediately. Otherwise the queue will be processed during
+ * a context switch interrupt. In any case, elements on the queue will get sent
+ * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
+ * globally unique 20-bits submission ID.
+ *
+ * When execution of a request completes, the GPU updates the context status
+ * buffer with a context complete event and generates a context switch interrupt.
+ * During the interrupt handling, the driver examines the events in the buffer:
+ * for each context complete event, if the announced ID matches that on the head
+ * of the request queue, then that request is retired and removed from the queue.
+ *
+ * After processing, if any requests were retired and the queue is not empty
+ * then a new execution list can be submitted. The two requests at the front of
+ * the queue are next to be submitted but since a context may not occur twice in
+ * an execution list, if subsequent requests have the same ID as the first then
+ * the two requests must be combined. This is done simply by discarding requests
+ * at the head of the queue until either only one requests is left (in which case
+ * we use a NULL second context) or the first two requests have unique IDs.
+ *
+ * By always executing the first two requests in the queue the driver ensures
+ * that the GPU is kept as busy as possible. In the case where a single context
+ * completes but a second context is still executing, the request for this second
+ * context will be at the head of the queue when we remove the first one. This
+ * request will then be resubmitted along with a new request for a different context,
+ * which will cause the hardware to continue executing the second request and queue
+ * the new request (the GPU detects the condition of a context getting preempted
+ * with the same context and optimizes the context switch flow by not doing
+ * preemption, but just sampling the new tail pointer).
+ *
+ */
+#include <linux/interrupt.h>
+#include <linux/string_helpers.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "gen8_engine_cs.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_regs.h"
+#include "intel_engine_stats.h"
+#include "intel_execlists_submission.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_regs.h"
+#include "intel_gt_requests.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_mocs.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "intel_workarounds.h"
+#include "shmem_utils.h"
+
+#define RING_EXECLIST_QFULL (1 << 0x2)
+#define RING_EXECLIST1_VALID (1 << 0x3)
+#define RING_EXECLIST0_VALID (1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
+#define RING_EXECLIST1_ACTIVE (1 << 0x11)
+#define RING_EXECLIST0_ACTIVE (1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
+
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID 0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
+#define XEHP_CTX_STATUS_SWITCHED_TO_NEW_QUEUE BIT(1) /* upper csb dword */
+#define XEHP_CSB_SW_CTX_ID_MASK GENMASK(31, 10)
+#define XEHP_IDLE_CTX_ID 0xFFFF
+#define XEHP_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(XEHP_CSB_SW_CTX_ID_MASK, csb_dw) != XEHP_IDLE_CTX_ID)
+
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+struct virtual_engine {
+ struct intel_engine_cs base;
+ struct intel_context context;
+ struct rcu_work rcu;
+
+ /*
+ * We allow only a single request through the virtual engine at a time
+ * (each request in the timeline waits for the completion fence of
+ * the previous before being submitted). By restricting ourselves to
+ * only submitting a single request, each request is placed on to a
+ * physical to maximise load spreading (by virtue of the late greedy
+ * scheduling -- each real engine takes the next available request
+ * upon idling).
+ */
+ struct i915_request *request;
+
+ /*
+ * We keep a rbtree of available virtual engines inside each physical
+ * engine, sorted by priority. Here we preallocate the nodes we need
+ * for the virtual engine, indexed by physical_engine->id.
+ */
+ struct ve_node {
+ struct rb_node rb;
+ int prio;
+ } nodes[I915_NUM_ENGINES];
+
+ /* And finally, which physical engines this virtual engine maps onto. */
+ unsigned int num_siblings;
+ struct intel_engine_cs *siblings[];
+};
+
+static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!intel_engine_is_virtual(engine));
+ return container_of(engine, struct virtual_engine, base);
+}
+
+static struct intel_context *
+execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags);
+
+static struct i915_request *
+__active_request(const struct intel_timeline * const tl,
+ struct i915_request *rq,
+ int error)
+{
+ struct i915_request *active = rq;
+
+ list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ if (error) {
+ i915_request_set_error_once(rq, error);
+ __i915_request_skip(rq);
+ }
+ active = rq;
+ }
+
+ return active;
+}
+
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ return __active_request(tl, rq, 0);
+}
+
+static void ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+ /*
+ * We inspect HWS_PREEMPT with a semaphore inside
+ * engine->emit_fini_breadcrumb. If the dword is true,
+ * the ring is paused as the semaphore will busywait
+ * until the dword is false.
+ */
+ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ if (state)
+ wmb();
+}
+
+static struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static int rq_prio(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->sched.attr.priority);
+}
+
+static int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ /*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ return prio;
+}
+
+static int queue_prio(const struct i915_sched_engine *sched_engine)
+{
+ struct rb_node *rb;
+
+ rb = rb_first_cached(&sched_engine->queue);
+ if (!rb)
+ return INT_MIN;
+
+ return to_priolist(rb)->priority;
+}
+
+static int virtual_prio(const struct intel_engine_execlists *el)
+{
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
+}
+
+static bool need_preempt(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ int last_prio;
+
+ if (!intel_engine_has_semaphores(engine))
+ return false;
+
+ /*
+ * Check if the current priority hint merits a preemption attempt.
+ *
+ * We record the highest value priority we saw during rescheduling
+ * prior to this dequeue, therefore we know that if it is strictly
+ * less than the current tail of ESLP[0], we do not need to force
+ * a preempt-to-idle cycle.
+ *
+ * However, the priority hint is a mere hint that we may need to
+ * preempt. If that hint is stale or we may be trying to preempt
+ * ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
+ */
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->sched_engine->queue_priority_hint <= last_prio)
+ return false;
+
+ /*
+ * Check against the first request in ELSP[1], it will, thanks to the
+ * power of PI, be the highest priority of that context.
+ */
+ if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) &&
+ rq_prio(list_next_entry(rq, sched.link)) > last_prio)
+ return true;
+
+ /*
+ * If the inflight context did not trigger the preemption, then maybe
+ * it was the set of queued requests? Pick the highest priority in
+ * the queue (the first active priolist) and see if it deserves to be
+ * running instead of ELSP[0].
+ *
+ * The highest priority request in the queue can not be either
+ * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+ * context, it's priority would not exceed ELSP[0] aka last_prio.
+ */
+ return max(virtual_prio(&engine->execlists),
+ queue_prio(engine->sched_engine)) > last_prio;
+}
+
+__maybe_unused static bool
+assert_priority_queue(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ /*
+ * Without preemption, the prev may refer to the still active element
+ * which we refuse to let go.
+ *
+ * Even with preemption, there are times when we think it is better not
+ * to preempt and leave an ostensibly lower priority request in flight.
+ */
+ if (i915_request_is_active(prev))
+ return true;
+
+ return rq_prio(prev) >= rq_prio(next);
+}
+
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *pl;
+ int prio = I915_PRIORITY_INVALID;
+
+ lockdep_assert_held(&engine->sched_engine->lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->sched_engine->requests,
+ sched.link) {
+ if (__i915_request_is_complete(rq)) {
+ list_del_init(&rq->sched.link);
+ continue;
+ }
+
+ __i915_request_unsubmit(rq);
+
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine->sched_engine,
+ prio);
+ }
+ GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
+
+ list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Check in case we rollback so far we wrap [size/2] */
+ if (intel_ring_direction(rq->ring,
+ rq->tail,
+ rq->ring->tail + 8) > 0)
+ rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
+ active = rq;
+ }
+
+ return active;
+}
+
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ return __unwind_incomplete_requests(engine);
+}
+
+static void
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
+{
+ /*
+ * Only used when GVT-g is enabled now. When GVT-g is disabled,
+ * The compiler should eliminate this function as dead-code.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return;
+
+ atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+ status, rq);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (__i915_request_is_complete(rq))
+ head = rq->tail;
+ else
+ head = __active_request(ce->timeline, rq, -EIO)->head;
+ head = intel_ring_wrap(ce->ring, head);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ lrc_init_regs(ce, engine, true);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static bool bad_request(const struct i915_request *rq)
+{
+ return rq->fence.error && i915_request_started(rq);
+}
+
+static struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ struct intel_context * const ce = rq->context;
+
+ intel_context_get(ce);
+
+ if (unlikely(intel_context_is_closed(ce) &&
+ !intel_engine_has_heartbeat(engine)))
+ intel_context_set_exiting(ce);
+
+ if (unlikely(!intel_context_is_schedulable(ce) || bad_request(rq)))
+ reset_active(rq, engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "before");
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+ ce->lrc.ccid = ce->tag;
+ } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ /* We don't need a strict matching tag, just different values */
+ unsigned int tag = ffs(READ_ONCE(engine->context_tag));
+
+ GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
+ clear_bit(tag - 1, &engine->context_tag);
+ ce->lrc.ccid = tag << (XEHP_SW_CTX_ID_SHIFT - 32);
+
+ BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
+
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ unsigned int tag = __ffs(engine->context_tag);
+
+ GEM_BUG_ON(tag >= BITS_PER_LONG);
+ __clear_bit(tag, &engine->context_tag);
+ ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
+ ce->lrc.ccid |= engine->execlists.ccid;
+
+ __intel_gt_pm_get(engine->gt);
+ if (engine->fw_domain && !engine->fw_active++)
+ intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(engine);
+
+ CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
+
+ return engine;
+}
+
+static void execlists_schedule_in(struct i915_request *rq, int idx)
+{
+ struct intel_context * const ce = rq->context;
+ struct intel_engine_cs *old;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+ trace_i915_request_in(rq, idx);
+
+ old = ce->inflight;
+ if (!old)
+ old = __execlists_schedule_in(rq);
+ WRITE_ONCE(ce->inflight, ptr_inc(old));
+
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+}
+
+static void
+resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
+{
+ struct intel_engine_cs *engine = rq->engine;
+
+ spin_lock_irq(&engine->sched_engine->lock);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ WRITE_ONCE(rq->engine, &ve->base);
+ ve->base.submit_request(rq);
+
+ spin_unlock_irq(&engine->sched_engine->lock);
+}
+
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ struct intel_engine_cs *engine = rq->engine;
+
+ /*
+ * After this point, the rq may be transferred to a new sibling, so
+ * before we clear ce->inflight make sure that the context has been
+ * removed from the b->signalers and furthermore we need to make sure
+ * that the concurrent iterator in signal_irq_work is no longer
+ * following ce->signal_link.
+ */
+ if (!list_empty(&ce->signals))
+ intel_context_remove_breadcrumbs(ce, engine->breadcrumbs);
+
+ /*
+ * This engine is now too busy to run this virtual request, so
+ * see if we can find an alternative engine for it to execute on.
+ * Once a request has become bonded to this engine, we treat it the
+ * same as other native request.
+ */
+ if (i915_request_in_priority_queue(rq) &&
+ rq->execution_mask != engine->mask)
+ resubmit_virtual_request(rq, ve);
+
+ if (READ_ONCE(ve->request))
+ tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
+}
+
+static void __execlists_schedule_out(struct i915_request * const rq,
+ struct intel_context * const ce)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ unsigned int ccid;
+
+ /*
+ * NB process_csb() is not under the engine->sched_engine->lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
+ CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid);
+ GEM_BUG_ON(ce->inflight != engine);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ lrc_check_regs(ce, engine, "after");
+
+ /*
+ * If we have just completed this context, the engine may now be
+ * idle and we want to re-enter powersaving.
+ */
+ if (intel_timeline_is_last(ce->timeline, rq) &&
+ __i915_request_is_complete(rq))
+ intel_engine_add_retire(engine, ce->timeline);
+
+ ccid = ce->lrc.ccid;
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ ccid >>= XEHP_SW_CTX_ID_SHIFT - 32;
+ ccid &= XEHP_MAX_CONTEXT_HW_ID;
+ } else {
+ ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+ ccid &= GEN12_MAX_CONTEXT_HW_ID;
+ }
+
+ if (ccid < BITS_PER_LONG) {
+ GEM_BUG_ON(ccid == 0);
+ GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+ __set_bit(ccid - 1, &engine->context_tag);
+ }
+ intel_engine_context_out(engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ if (engine->fw_domain && !--engine->fw_active)
+ intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
+ intel_gt_pm_put_async(engine->gt);
+
+ /*
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
+ */
+ if (ce->engine != engine)
+ kick_siblings(rq, ce);
+
+ WRITE_ONCE(ce->inflight, NULL);
+ intel_context_put(ce);
+}
+
+static inline void execlists_schedule_out(struct i915_request *rq)
+{
+ struct intel_context * const ce = rq->context;
+
+ trace_i915_request_out(rq);
+
+ GEM_BUG_ON(!ce->inflight);
+ ce->inflight = ptr_dec(ce->inflight);
+ if (!__intel_context_inflight_count(ce->inflight))
+ __execlists_schedule_out(rq, ce);
+
+ i915_request_put(rq);
+}
+
+static u32 map_i915_prio_to_lrc_desc_prio(int prio)
+{
+ if (prio > I915_PRIORITY_NORMAL)
+ return GEN12_CTX_PRIORITY_HIGH;
+ else if (prio < I915_PRIORITY_NORMAL)
+ return GEN12_CTX_PRIORITY_LOW;
+ else
+ return GEN12_CTX_PRIORITY_NORMAL;
+}
+
+static u64 execlists_update_context(struct i915_request *rq)
+{
+ struct intel_context *ce = rq->context;
+ u64 desc;
+ u32 tail, prev;
+
+ desc = ce->lrc.desc;
+ if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
+ desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ *
+ * We should never submit the context with the same RING_TAIL twice
+ * just in case we submit an empty ring, which confuses the HW.
+ *
+ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+ * the normal request to be able to always advance the RING_TAIL on
+ * subsequent resubmissions (for lite restore). Should that fail us,
+ * and we try and submit the same tail again, force the context
+ * reload.
+ *
+ * If we need to return to a preempted context, we need to skip the
+ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
+ * HW has a tendency to ignore us rewinding the TAIL to the end of
+ * an earlier request.
+ */
+ GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+ prev = rq->ring->tail;
+ tail = intel_ring_set_tail(rq->ring, rq->tail);
+ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
+ desc |= CTX_DESC_FORCE_RESTORE;
+ ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+ rq->tail = rq->wa_tail;
+
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+ *
+ * Ostensibly, writes (including the WCB) should be flushed prior to
+ * an uncached write such as our mmio register access, the empirical
+ * evidence (esp. on Braswell) suggests that the WC write into memory
+ * may not be visible to the HW prior to the completion of the UC
+ * register write and that we may begin execution from the context
+ * before its image is complete leading to invalid PD chasing.
+ */
+ wmb();
+
+ ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
+ return desc;
+}
+
+static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
+{
+ if (execlists->ctrl_reg) {
+ writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+ writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ } else {
+ writel(upper_32_bits(desc), execlists->submit_reg);
+ writel(lower_32_bits(desc), execlists->submit_reg);
+ }
+}
+
+static __maybe_unused char *
+dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
+{
+ if (!rq)
+ return "";
+
+ snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
+ prefix,
+ rq->context->lrc.ccid,
+ rq->fence.context, rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ rq_prio(rq));
+
+ return buf;
+}
+
+static __maybe_unused noinline void
+trace_ports(const struct intel_engine_execlists *execlists,
+ const char *msg,
+ struct i915_request * const *ports)
+{
+ const struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ char __maybe_unused p0[40], p1[40];
+
+ if (!ports[0])
+ return;
+
+ ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ dump_port(p0, sizeof(p0), "", ports[0]),
+ dump_port(p1, sizeof(p1), ", ", ports[1]));
+}
+
+static bool
+reset_in_progress(const struct intel_engine_cs *engine)
+{
+ return unlikely(!__tasklet_is_enabled(&engine->sched_engine->tasklet));
+}
+
+static __maybe_unused noinline bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+ const char *msg)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+ struct i915_request * const *port, *rq, *prev = NULL;
+ struct intel_context *ce = NULL;
+ u32 ccid = -1;
+
+ trace_ports(execlists, msg, execlists->pending);
+
+ /* We may be messing around with the lists during reset, lalala */
+ if (reset_in_progress(engine))
+ return true;
+
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
+ engine->name);
+ return false;
+ }
+
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
+ engine->name, execlists_num_ports(execlists));
+ return false;
+ }
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ unsigned long flags;
+ bool ok = true;
+
+ GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+ GEM_BUG_ON(!i915_request_is_active(rq));
+
+ if (ce == rq->context) {
+ GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ce = rq->context;
+
+ if (ccid == ce->lrc.ccid) {
+ GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
+ engine->name,
+ ccid, ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ ccid = ce->lrc.ccid;
+
+ /*
+ * Sentinels are supposed to be the last request so they flush
+ * the current execution off the HW. Check that they are the only
+ * request in the pending submission.
+ *
+ * NB: Due to the async nature of preempt-to-busy and request
+ * cancellation we need to handle the case where request
+ * becomes a sentinel in parallel to CSB processing.
+ */
+ if (prev && i915_request_has_sentinel(prev) &&
+ !READ_ONCE(prev->fence.error)) {
+ GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+ prev = rq;
+
+ /*
+ * We want virtual requests to only be in the first slot so
+ * that they are never stuck behind a hog and can be immediately
+ * transferred onto the next idle engine.
+ */
+ if (rq->execution_mask != engine->mask &&
+ port != execlists->pending) {
+ GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ /* Hold tightly onto the lock to prevent concurrent retires! */
+ if (!spin_trylock_irqsave(&rq->lock, flags))
+ continue;
+
+ if (__i915_request_is_complete(rq))
+ goto unlock;
+
+ if (i915_active_is_idle(&ce->active) &&
+ !intel_context_is_barrier(ce)) {
+ GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
+ engine->name,
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ ok = false;
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (!ok)
+ return false;
+ }
+
+ return ce;
+}
+
+static void execlists_submit_ports(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ unsigned int n;
+
+ GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
+ /*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /*
+ * ELSQ note: the submit queue is not cleared after being submitted
+ * to the HW so we need to make sure we always clean it up. This is
+ * currently ensured by the fact that we always write the same number
+ * of elsq entries, keep this in mind before changing the loop below.
+ */
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct i915_request *rq = execlists->pending[n];
+
+ write_desc(execlists,
+ rq ? execlists_update_context(rq) : 0,
+ n);
+ }
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+}
+
+static bool ctx_single_port_submission(const struct intel_context *ce)
+{
+ return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+ intel_context_force_single_submission(ce));
+}
+
+static bool can_merge_ctx(const struct intel_context *prev,
+ const struct intel_context *next)
+{
+ if (prev != next)
+ return false;
+
+ if (ctx_single_port_submission(prev))
+ return false;
+
+ return true;
+}
+
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
+static bool can_merge_rq(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ GEM_BUG_ON(prev == next);
+ GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+ /*
+ * We do not submit known completed requests. Therefore if the next
+ * request is already completed, we can pretend to merge it in
+ * with the previous context (and we will skip updating the ELSP
+ * and tracking). Thus hopefully keeping the ELSP full with active
+ * contexts, despite the best efforts of preempt-to-busy to confuse
+ * us.
+ */
+ if (__i915_request_is_complete(next))
+ return true;
+
+ if (unlikely((i915_request_flags(prev) | i915_request_flags(next)) &
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
+ return false;
+
+ if (!can_merge_ctx(prev->context, next->context))
+ return false;
+
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
+ return true;
+}
+
+static bool virtual_matches(const struct virtual_engine *ve,
+ const struct i915_request *rq,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_engine_cs *inflight;
+
+ if (!rq)
+ return false;
+
+ if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
+ return false;
+
+ /*
+ * We track when the HW has completed saving the context image
+ * (i.e. when we have seen the final CS event switching out of
+ * the context) and must not overwrite the context image before
+ * then. This restricts us to only using the active engine
+ * while the previous virtualized request is inflight (so
+ * we reuse the register offsets). This is a very small
+ * hystersis on the greedy seelction algorithm.
+ */
+ inflight = intel_context_inflight(&ve->context);
+ if (inflight && inflight != engine)
+ return false;
+
+ return true;
+}
+
+static struct virtual_engine *
+first_virtual_engine(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ while (rb) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ /* lazily cleanup after another engine handled rq */
+ if (!rq || !virtual_matches(ve, rq, engine)) {
+ rb_erase_cached(rb, &el->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&el->virtual);
+ continue;
+ }
+
+ return ve;
+ }
+
+ return NULL;
+}
+
+static void virtual_xfer_context(struct virtual_engine *ve,
+ struct intel_engine_cs *engine)
+{
+ unsigned int n;
+
+ if (likely(engine == ve->siblings[0]))
+ return;
+
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+ if (!intel_engine_has_relative_mmio(engine))
+ lrc_update_offsets(&ve->context, engine);
+
+ /*
+ * Move the bound engine to the top of the list for
+ * future execution. We then kick this tasklet first
+ * before checking others, so that we preferentially
+ * reuse this set of bound registers.
+ */
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n], ve->siblings[0]);
+ break;
+ }
+ }
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+ LIST_HEAD(list);
+
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+ __i915_request_has_started(w) &&
+ !__i915_request_is_complete(rq));
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ GEM_BUG_ON(i915_request_is_active(w));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ return;
+
+ defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
+ rq_prio(rq)));
+}
+
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+ const struct i915_request *rq)
+{
+ /*
+ * Once bitten, forever smitten!
+ *
+ * If the active context ever busy-waited on a semaphore,
+ * it will be treated as a hog until the end of its timeslice (i.e.
+ * until it is scheduled out and replaced by a new submission,
+ * possibly even its own lite-restore). The HW only sends an interrupt
+ * on the first miss, and we do know if that semaphore has been
+ * signaled, or even if it is now stuck on another semaphore. Play
+ * safe, yield if it might be stuck -- it will be given a fresh
+ * timeslice in the near future.
+ */
+ return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return false;
+
+ /* If not currently active, or about to switch, wait for next event */
+ if (!rq || __i915_request_is_complete(rq))
+ return false;
+
+ /* We do not need to start the timeslice until after the ACK */
+ if (READ_ONCE(engine->execlists.pending[0]))
+ return false;
+
+ /* If ELSP[1] is occupied, always check to see if worth slicing */
+ if (!list_is_last_rcu(&rq->sched.link,
+ &engine->sched_engine->requests)) {
+ ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
+ return true;
+ }
+
+ /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+ if (!i915_sched_engine_is_empty(engine->sched_engine)) {
+ ENGINE_TRACE(engine, "timeslice required for queue\n");
+ return true;
+ }
+
+ if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
+ ENGINE_TRACE(engine, "timeslice required for virtual\n");
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ const struct intel_engine_execlists *el = &engine->execlists;
+
+ if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+ return false;
+
+ if (!needs_timeslice(engine, rq))
+ return false;
+
+ return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
+static unsigned long timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ unsigned long duration;
+
+ /* Disable the timer if there is nothing to switch to */
+ duration = 0;
+ if (needs_timeslice(engine, *el->active)) {
+ /* Avoid continually prolonging an active timeslice */
+ if (timer_active(&el->timer)) {
+ /*
+ * If we just submitted a new ELSP after an old
+ * context, that context may have already consumed
+ * its timeslice, so recheck.
+ */
+ if (!timer_pending(&el->timer))
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
+ return;
+ }
+
+ duration = timeslice(engine);
+ }
+
+ set_timer_ms(&el->timer, duration);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!rq)
+ return 0;
+
+ /* Only allow ourselves to force reset the currently active context */
+ engine->execlists.preempt_target = rq;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
+ return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
+}
+
+static bool completed(const struct i915_request *rq)
+{
+ if (i915_request_has_sentinel(rq))
+ return false;
+
+ return __i915_request_is_complete(rq);
+}
+
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched_engine * const sched_engine = engine->sched_engine;
+ struct i915_request **port = execlists->pending;
+ struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request *last, * const *active;
+ struct virtual_engine *ve;
+ struct rb_node *rb;
+ bool submit = false;
+
+ /*
+ * Hardware submission is through 2 ports. Conceptually each port
+ * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+ * static for a context, and unique to each, so we only execute
+ * requests belonging to a single context from each ring. RING_HEAD
+ * is maintained by the CS in the context image, it marks the place
+ * where it got up to last time, and through RING_TAIL we tell the CS
+ * where we want to execute up to this time.
+ *
+ * In this list the requests are in order of execution. Consecutive
+ * requests from the same context are adjacent in the ringbuffer. We
+ * can combine these requests into a single RING_TAIL update:
+ *
+ * RING_HEAD...req1...req2
+ * ^- RING_TAIL
+ * since to execute req2 the CS must first execute req1.
+ *
+ * Our goal then is to point each port to the end of a consecutive
+ * sequence of requests as being the most optimal (fewest wake ups
+ * and context switches) submission.
+ */
+
+ spin_lock(&sched_engine->lock);
+
+ /*
+ * If the queue is higher priority than the last
+ * request in the currently active context, submit afresh.
+ * We will resubmit again afterwards in case we need to split
+ * the active context to interject the preemption request,
+ * i.e. we will retrigger preemption following the ack in case
+ * of trouble.
+ *
+ */
+ active = execlists->active;
+ while ((last = *active) && completed(last))
+ active++;
+
+ if (last) {
+ if (need_preempt(engine, last)) {
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ sched_engine->queue_priority_hint);
+ record_preemption(execlists);
+
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
+
+ /*
+ * Note that we have not stopped the GPU at this point,
+ * so we are unwinding the incomplete requests as they
+ * remain inflight and so by the time we do complete
+ * the preemption, some of the unwound requests may
+ * complete!
+ */
+ __unwind_incomplete_requests(engine);
+
+ last = NULL;
+ } else if (timeslice_expired(engine, last)) {
+ ENGINE_TRACE(engine,
+ "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+ str_yes_no(timer_expired(&execlists->timer)),
+ last->fence.context, last->fence.seqno,
+ rq_prio(last),
+ sched_engine->queue_priority_hint,
+ str_yes_no(timeslice_yield(execlists, last)));
+
+ /*
+ * Consume this timeslice; ensure we start a new one.
+ *
+ * The timeslice expired, and we will unwind the
+ * running contexts and recompute the next ELSP.
+ * If that submit will be the same pair of contexts
+ * (due to dependency ordering), we will skip the
+ * submission. If we don't cancel the timer now,
+ * we will see that the timer has expired and
+ * reschedule the tasklet; continually until the
+ * next context switch or other preemption event.
+ *
+ * Since we have decided to reschedule based on
+ * consumption of this timeslice, if we submit the
+ * same context again, grant it a full timeslice.
+ */
+ cancel_timer(&execlists->timer);
+ ring_set_paused(engine, 1);
+ defer_active(engine);
+
+ /*
+ * Unlike for preemption, if we rewind and continue
+ * executing the same context as previously active,
+ * the order of execution will remain the same and
+ * the tail will only advance. We do not need to
+ * force a full context restore, as a lite-restore
+ * is sufficient to resample the monotonic TAIL.
+ *
+ * If we switch to any other context, similarly we
+ * will not rewind TAIL of current context, and
+ * normal save/restore will preserve state and allow
+ * us to later continue executing the same request.
+ */
+ last = NULL;
+ } else {
+ /*
+ * Otherwise if we already have a request pending
+ * for execution after the current one, we can
+ * just wait until the next CS event before
+ * queuing more. In either case we will force a
+ * lite-restore preemption event, but if we wait
+ * we hopefully coalesce several updates into a single
+ * submission.
+ */
+ if (active[1]) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ spin_unlock(&sched_engine->lock);
+ return;
+ }
+ }
+ }
+
+ /* XXX virtual is always taking precedence */
+ while ((ve = first_virtual_engine(engine))) {
+ struct i915_request *rq;
+
+ spin_lock(&ve->base.sched_engine->lock);
+
+ rq = ve->request;
+ if (unlikely(!virtual_matches(ve, rq, engine)))
+ goto unlock; /* lost the race to a sibling */
+
+ GEM_BUG_ON(rq->engine != &ve->base);
+ GEM_BUG_ON(rq->context != &ve->context);
+
+ if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) {
+ spin_unlock(&ve->base.sched_engine->lock);
+ break;
+ }
+
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.sched_engine->lock);
+ spin_unlock(&engine->sched_engine->lock);
+ return; /* leave this for another sibling */
+ }
+
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ __i915_request_is_complete(rq) ? "!" :
+ __i915_request_has_started(rq) ? "*" :
+ "",
+ str_yes_no(engine != ve->siblings[0]));
+
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.sched_engine->queue_priority_hint, INT_MIN);
+
+ rb = &ve->nodes[engine->id].rb;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+ WRITE_ONCE(rq->engine, engine);
+
+ if (__i915_request_submit(rq)) {
+ /*
+ * Only after we confirm that we will submit
+ * this request (i.e. it has not already
+ * completed), do we want to update the context.
+ *
+ * This serves two purposes. It avoids
+ * unnecessary work if we are resubmitting an
+ * already completed request after timeslicing.
+ * But more importantly, it prevents us altering
+ * ve->siblings[] on an idle context, where
+ * we may be using ve->siblings[] in
+ * virtual_context_enter / virtual_context_exit.
+ */
+ virtual_xfer_context(ve, engine);
+ GEM_BUG_ON(ve->siblings[0] != engine);
+
+ submit = true;
+ last = rq;
+ }
+
+ i915_request_put(rq);
+unlock:
+ spin_unlock(&ve->base.sched_engine->lock);
+
+ /*
+ * Hmm, we have a bunch of virtual engine requests,
+ * but the first one was already completed (thanks
+ * preempt-to-busy!). Keep looking at the veng queue
+ * until we have no more relevant requests (i.e.
+ * the normal submit queue has higher priority).
+ */
+ if (submit)
+ break;
+ }
+
+ while ((rb = rb_first_cached(&sched_engine->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
+
+ priolist_for_each_request_consume(rq, rn, p) {
+ bool merge = true;
+
+ /*
+ * Can we combine this request with the current port?
+ * It has to be the same context/ringbuffer and not
+ * have any exceptions (e.g. GVT saying never to
+ * combine contexts).
+ *
+ * If we can combine the requests, we can execute both
+ * by updating the RING_TAIL to point to the end of the
+ * second request, and so we never need to tell the
+ * hardware about the first.
+ */
+ if (last && !can_merge_rq(last, rq)) {
+ /*
+ * If we are on the second port and cannot
+ * combine this request with the last, then we
+ * are done.
+ */
+ if (port == last_port)
+ goto done;
+
+ /*
+ * We must not populate both ELSP[] with the
+ * same LRCA, i.e. we must submit 2 different
+ * contexts if we submit 2 ELSP.
+ */
+ if (last->context == rq->context)
+ goto done;
+
+ if (i915_request_has_sentinel(last))
+ goto done;
+
+ /*
+ * We avoid submitting virtual requests into
+ * the secondary ports so that we can migrate
+ * the request immediately to another engine
+ * rather than wait for the primary request.
+ */
+ if (rq->execution_mask != engine->mask)
+ goto done;
+
+ /*
+ * If GVT overrides us we only ever submit
+ * port[0], leaving port[1] empty. Note that we
+ * also have to be careful that we don't queue
+ * the same context (even though a different
+ * request) to the second port.
+ */
+ if (ctx_single_port_submission(last->context) ||
+ ctx_single_port_submission(rq->context))
+ goto done;
+
+ merge = false;
+ }
+
+ if (__i915_request_submit(rq)) {
+ if (!merge) {
+ *port++ = i915_request_get(last);
+ last = NULL;
+ }
+
+ GEM_BUG_ON(last &&
+ !can_merge_ctx(last->context,
+ rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
+
+ submit = true;
+ last = rq;
+ }
+ }
+
+ rb_erase_cached(&p->node, &sched_engine->queue);
+ i915_priolist_free(p);
+ }
+done:
+ *port++ = i915_request_get(last);
+
+ /*
+ * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
+ *
+ * We choose the priority hint such that if we add a request of greater
+ * priority than this, we kick the submission tasklet to decide on
+ * the right order of submitting the requests to hardware. We must
+ * also be prepared to reorder requests as they are in-flight on the
+ * HW. We derive the priority hint then as the first "hole" in
+ * the HW submission ports and if there are no available slots,
+ * the priority of the lowest executing request, i.e. last.
+ *
+ * When we do receive a higher priority request ready to run from the
+ * user, see queue_request(), the priority hint is bumped to that
+ * request triggering preemption on the next dequeue (or subsequent
+ * interrupt for secondary ports).
+ */
+ sched_engine->queue_priority_hint = queue_prio(sched_engine);
+ i915_sched_engine_reset_on_empty(sched_engine);
+ spin_unlock(&sched_engine->lock);
+
+ /*
+ * We can skip poking the HW if we ended up with exactly the same set
+ * of requests as currently running, e.g. trying to timeslice a pair
+ * of ordered contexts.
+ */
+ if (submit &&
+ memcmp(active,
+ execlists->pending,
+ (port - execlists->pending) * sizeof(*port))) {
+ *port = NULL;
+ while (port-- != execlists->pending)
+ execlists_schedule_in(*port, port - execlists->pending);
+
+ WRITE_ONCE(execlists->yield, -1);
+ set_preempt_timeout(engine, *active);
+ execlists_submit_ports(engine);
+ } else {
+ ring_set_paused(engine, 0);
+ while (port-- != execlists->pending)
+ i915_request_put(*port);
+ *execlists->pending = NULL;
+ }
+}
+
+static void execlists_dequeue_irq(struct intel_engine_cs *engine)
+{
+ local_irq_disable(); /* Suspend interrupts across request submission */
+ execlists_dequeue(engine);
+ local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+}
+
+static void clear_ports(struct i915_request **ports, int count)
+{
+ memset_p((void **)ports, NULL, count);
+}
+
+static void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+ /* A memcpy_p() would be very useful here! */
+ while (count--)
+ WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
+static struct i915_request **
+cancel_port_requests(struct intel_engine_execlists * const execlists,
+ struct i915_request **inactive)
+{
+ struct i915_request * const *port;
+
+ for (port = execlists->pending; *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
+
+ /* Mark the end of active before we overwrite *active */
+ for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ *inactive++ = *port;
+ clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+
+ smp_wmb(); /* complete the seqlock for execlists_active() */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* Having cancelled all outstanding process_csb(), stop their timers */
+ GEM_BUG_ON(execlists->pending[0]);
+ cancel_timer(&execlists->timer);
+ cancel_timer(&execlists->preempt);
+
+ return inactive;
+}
+
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ * bit 0: switched to new queue
+ * bit 1: reserved
+ * bit 2: semaphore wait mode (poll or signal), only valid when
+ * switch detail is set to "wait on semaphore"
+ * bits 3-5: engine class
+ * bits 6-11: engine instance
+ * bits 12-14: reserved
+ * bits 15-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bits 32-35: context switch detail
+ * - 0: ctx complete
+ * - 1: wait on sync flip
+ * - 2: wait on vblank
+ * - 3: wait on scanline
+ * - 4: wait on semaphore
+ * - 5: context preempted (not on SEMAPHORE_WAIT or
+ * WAIT_FOR_EVENT)
+ * bit 36: reserved
+ * bits 37-43: wait detail (for switch detail 1 to 4)
+ * bits 44-46: reserved
+ * bits 47-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ *
+ * Xe_HP csb shuffles things around compared to TGL:
+ *
+ * bits 0-3: context switch detail (same possible values as TGL)
+ * bits 4-9: engine instance
+ * bits 10-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bit 32: semaphore wait mode (poll or signal), Only valid when
+ * switch detail is set to "wait on semaphore"
+ * bit 33: switched to new queue
+ * bits 34-41: wait detail (for switch detail 1 to 4)
+ * bits 42-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static inline bool
+__gen12_csb_parse(bool ctx_to_valid, bool ctx_away_valid, bool new_queue,
+ u8 switch_detail)
+{
+ /*
+ * The context switch detail is not guaranteed to be 5 when a preemption
+ * occurs, so we can't just check for that. The check below works for
+ * all the cases we care about, including preemptions of WAIT
+ * instructions and lite-restore. Preempt-to-idle via the CTRL register
+ * would require some extra handling, but we don't support that.
+ */
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!ctx_to_valid);
+ return true;
+ }
+
+ /*
+ * switch detail = 5 is covered by the case above and we do not expect a
+ * context switch on an unsuccessful wait instruction since we always
+ * use polling mode.
+ */
+ GEM_BUG_ON(switch_detail);
+ return false;
+}
+
+static bool xehp_csb_parse(const u64 csb)
+{
+ return __gen12_csb_parse(XEHP_CSB_CTX_VALID(lower_32_bits(csb)), /* cxt to */
+ XEHP_CSB_CTX_VALID(upper_32_bits(csb)), /* cxt away */
+ upper_32_bits(csb) & XEHP_CTX_STATUS_SWITCHED_TO_NEW_QUEUE,
+ GEN12_CTX_SWITCH_DETAIL(lower_32_bits(csb)));
+}
+
+static bool gen12_csb_parse(const u64 csb)
+{
+ return __gen12_csb_parse(GEN12_CSB_CTX_VALID(lower_32_bits(csb)), /* cxt to */
+ GEN12_CSB_CTX_VALID(upper_32_bits(csb)), /* cxt away */
+ lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE,
+ GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
+}
+
+static bool gen8_csb_parse(const u64 csb)
+{
+ return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
+}
+
+static noinline u64
+wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry;
+
+ /*
+ * Reading from the HWSP has one particular advantage: we can detect
+ * a stale entry. Since the write into HWSP is broken, we have no reason
+ * to trust the HW at all, the mmio entry may equally be unordered, so
+ * we prefer the path that is self-checking and as a last resort,
+ * return the mmio value.
+ *
+ * tgl,dg1:HSDES#22011327657
+ */
+ preempt_disable();
+ if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
+ int idx = csb - engine->execlists.csb_status;
+ int status;
+
+ status = GEN8_EXECLISTS_STATUS_BUF;
+ if (idx >= 6) {
+ status = GEN11_EXECLISTS_STATUS_BUF2;
+ idx -= 6;
+ }
+ status += sizeof(u64) * idx;
+
+ entry = intel_uncore_read64(engine->uncore,
+ _MMIO(engine->mmio_base + status));
+ }
+ preempt_enable();
+
+ return entry;
+}
+
+static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+{
+ u64 entry = READ_ONCE(*csb);
+
+ /*
+ * Unfortunately, the GPU does not always serialise its write
+ * of the CSB entries before its write of the CSB pointer, at least
+ * from the perspective of the CPU, using what is known as a Global
+ * Observation Point. We may read a new CSB tail pointer, but then
+ * read the stale CSB entries, causing us to misinterpret the
+ * context-switch events, and eventually declare the GPU hung.
+ *
+ * icl:HSDES#1806554093
+ * tgl:HSDES#22011248461
+ */
+ if (unlikely(entry == -1))
+ entry = wa_csb_read(engine, csb);
+
+ /* Consume this entry so that we can spot its future reuse. */
+ WRITE_ONCE(*csb, -1);
+
+ /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
+ return entry;
+}
+
+static void new_timeslice(struct intel_engine_execlists *el)
+{
+ /* By cancelling, we will start afresh in start_timeslice() */
+ cancel_timer(&el->timer);
+}
+
+static struct i915_request **
+process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ u64 * const buf = execlists->csb_status;
+ const u8 num_entries = execlists->csb_size;
+ struct i915_request **prev;
+ u8 head, tail;
+
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&engine->sched_engine->tasklet) &&
+ !reset_in_progress(engine));
+
+ /*
+ * Note that csb_write, csb_status may be either in HWSP or mmio.
+ * When reading from the csb_write mmio register, we have to be
+ * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+ * the low 4bits. As it happens we know the next 4bits are always
+ * zero and so we can simply masked off the low u8 of the register
+ * and treat it identically to reading from the HWSP (without having
+ * to use explicit shifting and masking, and probably bifurcating
+ * the code to handle the legacy mmio read).
+ */
+ head = execlists->csb_head;
+ tail = READ_ONCE(*execlists->csb_write);
+ if (unlikely(head == tail))
+ return inactive;
+
+ /*
+ * We will consume all events from HW, or at least pretend to.
+ *
+ * The sequence of events from the HW is deterministic, and derived
+ * from our writes to the ELSP, with a smidgen of variability for
+ * the arrival of the asynchronous requests wrt to the inflight
+ * execution. If the HW sends an event that does not correspond with
+ * the one we are expecting, we have to abandon all hope as we lose
+ * all tracking of what the engine is actually executing. We will
+ * only detect we are out of sequence with the HW when we get an
+ * 'impossible' event because we have already drained our own
+ * preemption/promotion queue. If this occurs, we know that we likely
+ * lost track of execution earlier and must unwind and restart, the
+ * simplest way is by stop processing the event queue and force the
+ * engine to reset.
+ */
+ execlists->csb_head = tail;
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+
+ /*
+ * Hopefully paired with a wmb() in HW!
+ *
+ * We must complete the read of the write pointer before any reads
+ * from the CSB, so that we do not see stale values. Without an rmb
+ * (lfence) the HW may speculatively perform the CSB[] reads *before*
+ * we perform the READ_ONCE(*csb_write).
+ */
+ rmb();
+
+ /* Remember who was last running under the timer */
+ prev = inactive;
+ *prev = NULL;
+
+ do {
+ bool promote;
+ u64 csb;
+
+ if (++head == num_entries)
+ head = 0;
+
+ /*
+ * We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
+
+ csb = csb_read(engine, buf + head);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, upper_32_bits(csb), lower_32_bits(csb));
+
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ promote = xehp_csb_parse(csb);
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ promote = gen12_csb_parse(csb);
+ else
+ promote = gen8_csb_parse(csb);
+ if (promote) {
+ struct i915_request * const *old = execlists->active;
+
+ if (GEM_WARN_ON(!*execlists->pending)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ ring_set_paused(engine, 0);
+
+ /* Point active to the new ELSP; prevent overwriting */
+ WRITE_ONCE(execlists->active, execlists->pending);
+ smp_wmb(); /* notify execlists_active() */
+
+ /* cancel old inflight, prepare for switch */
+ trace_ports(execlists, "preempted", old);
+ while (*old)
+ *inactive++ = *old++;
+
+ /* switch pending to inflight */
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+ copy_ports(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
+
+ /* XXX Magic delay for tgl */
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ WRITE_ONCE(execlists->pending[0], NULL);
+ } else {
+ if (GEM_WARN_ON(!*execlists->active)) {
+ execlists->error_interrupt |= ERROR_CSB;
+ break;
+ }
+
+ /* port0 completed, advanced to port1 */
+ trace_ports(execlists, "completed", execlists->active);
+
+ /*
+ * We rely on the hardware being strongly
+ * ordered, that the breadcrumb write is
+ * coherent (visible from the CPU) before the
+ * user interrupt is processed. One might assume
+ * that the breadcrumb write being before the
+ * user interrupt and the CS event for the context
+ * switch would therefore be before the CS event
+ * itself...
+ */
+ if (GEM_SHOW_DEBUG() &&
+ !__i915_request_is_complete(*execlists->active)) {
+ struct i915_request *rq = *execlists->active;
+ const u32 *regs __maybe_unused =
+ rq->context->lrc_reg_state;
+
+ ENGINE_TRACE(engine,
+ "context completed before request!\n");
+ ENGINE_TRACE(engine,
+ "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+ ENGINE_READ(engine, RING_START),
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_MI_MODE));
+ ENGINE_TRACE(engine,
+ "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->head, rq->tail,
+ rq->fence.context,
+ lower_32_bits(rq->fence.seqno),
+ hwsp_seqno(rq));
+ ENGINE_TRACE(engine,
+ "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+ regs[CTX_RING_START],
+ regs[CTX_RING_HEAD],
+ regs[CTX_RING_TAIL]);
+ }
+
+ *inactive++ = *execlists->active++;
+
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ }
+ } while (head != tail);
+
+ /*
+ * Gen11 has proven to fail wrt global observation point between
+ * entry and tail update, failing on the ordering and thus
+ * we see an old entry in the context status buffer.
+ *
+ * Forcibly evict out entries for the next gpu csb update,
+ * to increase the odds that we get a fresh entries with non
+ * working hardware. The cost for doing so comes out mostly with
+ * the wash as hardware, working or not, will need to do the
+ * invalidation before.
+ */
+ drm_clflush_virt_range(&buf[0], num_entries * sizeof(buf[0]));
+
+ /*
+ * We assume that any event reflects a change in context flow
+ * and merits a fresh timeslice. We reinstall the timer after
+ * inspecting the queue to see if we need to resumbit.
+ */
+ if (*prev != *execlists->active) { /* elide lite-restores */
+ struct intel_context *prev_ce = NULL, *active_ce = NULL;
+
+ /*
+ * Note the inherent discrepancy between the HW runtime,
+ * recorded as part of the context switch, and the CPU
+ * adjustment for active contexts. We have to hope that
+ * the delay in processing the CS event is very small
+ * and consistent. It works to our advantage to have
+ * the CPU adjustment _undershoot_ (i.e. start later than)
+ * the CS timestamp so we never overreport the runtime
+ * and correct overselves later when updating from HW.
+ */
+ if (*prev)
+ prev_ce = (*prev)->context;
+ if (*execlists->active)
+ active_ce = (*execlists->active)->context;
+ if (prev_ce != active_ce) {
+ if (prev_ce)
+ lrc_runtime_stop(prev_ce);
+ if (active_ce)
+ lrc_runtime_start(active_ce);
+ }
+ new_timeslice(execlists);
+ }
+
+ return inactive;
+}
+
+static void post_process_csb(struct i915_request **port,
+ struct i915_request **last)
+{
+ while (port != last)
+ execlists_schedule_out(*port++);
+}
+
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link,
+ &rq->engine->sched_engine->hold);
+ i915_request_set_hold(rq);
+ RQ_TRACE(rq, "on hold\n");
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (__i915_request_is_complete(w))
+ continue;
+
+ if (i915_request_on_hold(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ if (i915_request_on_hold(rq))
+ return false;
+
+ spin_lock_irq(&engine->sched_engine->lock);
+
+ if (__i915_request_is_complete(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+ GEM_BUG_ON(list_empty(&engine->sched_engine->hold));
+
+unlock:
+ spin_unlock_irq(&engine->sched_engine->lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+ bool result = false;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ rcu_read_lock();
+ for_each_signaler(p, rq) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ RQ_TRACE(rq, "hold release\n");
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine->sched_engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Also release any children on this engine that are ready */
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(w))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->sched_engine->lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) {
+ engine->sched_engine->queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
+ }
+
+ spin_unlock_irq(&engine->sched_engine->lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp, CORE_DUMP_FLAG_NONE);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp, CORE_DUMP_FLAG_NONE);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ cap->error->gt->engine->hung = true;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static struct i915_request *
+active_context(struct intel_engine_cs *engine, u32 ccid)
+{
+ const struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request * const *port, *rq;
+
+ /*
+ * Use the most recent result from process_csb(), but just in case
+ * we trigger an error (via interrupt) before the first CS event has
+ * been written, peek at the next submission.
+ */
+
+ for (port = el->active; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at active:%zd\n",
+ ccid, port - el->active);
+ return rq;
+ }
+ }
+
+ for (port = el->pending; (rq = *port); port++) {
+ if (rq->context->lrc.ccid == ccid) {
+ ENGINE_TRACE(engine,
+ "ccid:%x found at pending:%zd\n",
+ ccid, port - el->pending);
+ return rq;
+ }
+ }
+
+ ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ return NULL;
+}
+
+static u32 active_ccid(struct intel_engine_cs *engine)
+{
+ return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
+}
+
+static void execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return;
+
+ spin_lock_irq(&engine->sched_engine->lock);
+ cap->rq = active_context(engine, active_ccid(engine));
+ if (cap->rq) {
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ }
+ spin_unlock_irq(&engine->sched_engine->lock);
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ ENGINE_TRACE(engine, "reset for %s\n", msg);
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->sched_engine->tasklet);
+
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ execlists_capture(engine);
+ intel_engine_reset(engine, msg);
+
+ tasklet_enable(&engine->sched_engine->tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return engine->execlists.pending[0];
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(struct tasklet_struct *t)
+{
+ struct i915_sched_engine *sched_engine =
+ from_tasklet(sched_engine, t, tasklet);
+ struct intel_engine_cs * const engine = sched_engine->private_data;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = process_csb(engine, post);
+ GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
+
+ if (unlikely(preempt_timeout(engine))) {
+ const struct i915_request *rq = *engine->execlists.active;
+
+ /*
+ * If after the preempt-timeout expired, we are still on the
+ * same active request/context as before we initiated the
+ * preemption, reset the engine.
+ *
+ * However, if we have processed a CS event to switch contexts,
+ * but not yet processed the CS event for the pending
+ * preemption, reset the timer allowing the new context to
+ * gracefully exit.
+ */
+ cancel_timer(&engine->execlists.preempt);
+ if (rq == engine->execlists.preempt_target)
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ else
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
+ }
+
+ if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ const char *msg;
+
+ /* Generate the error message in priority wrt to the user! */
+ if (engine->execlists.error_interrupt & GENMASK(15, 0))
+ msg = "CS error"; /* thrown by a user payload */
+ else if (engine->execlists.error_interrupt & ERROR_CSB)
+ msg = "invalid CSB event";
+ else if (engine->execlists.error_interrupt & ERROR_PREEMPT)
+ msg = "preemption time out";
+ else
+ msg = "internal error";
+
+ engine->execlists.error_interrupt = 0;
+ execlists_reset(engine, msg);
+ }
+
+ if (!engine->execlists.pending[0]) {
+ execlists_dequeue_irq(engine);
+ start_timeslice(engine);
+ }
+
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ bool tasklet = false;
+
+ if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+ u32 eir;
+
+ /* Upper 16b are the enabling mask, rsvd for internal errors */
+ eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
+ ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+ /* Disable the error interrupt until after the reset */
+ if (likely(eir)) {
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, eir);
+ WRITE_ONCE(engine->execlists.error_interrupt, eir);
+ tasklet = true;
+ }
+ }
+
+ if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+ WRITE_ONCE(engine->execlists.yield,
+ ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+ ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+ engine->execlists.yield);
+ if (del_timer(&engine->execlists.timer))
+ tasklet = true;
+ }
+
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
+
+ if (iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(engine);
+
+ if (tasklet)
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
+}
+
+static void __execlists_kick(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
+}
+
+static void queue_request(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine->sched_engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static bool submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ struct i915_sched_engine *sched_engine = engine->sched_engine;
+
+ if (rq_prio(rq) <= sched_engine->queue_priority_hint)
+ return false;
+
+ sched_engine->queue_priority_hint = rq_prio(rq);
+ return true;
+}
+
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->sched_engine->hold) && hold_request(rq);
+}
+
+static void execlists_submit_request(struct i915_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ RQ_TRACE(request, "ancestor on hold\n");
+ list_add_tail(&request->sched.link,
+ &engine->sched_engine->hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
+
+ GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
+ GEM_BUG_ON(list_empty(&request->sched.link));
+
+ if (submit_queue(engine, request))
+ __execlists_kick(&engine->execlists);
+ }
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
+static int
+__execlists_context_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww, void **vaddr)
+{
+ int err;
+
+ err = lrc_pre_pin(ce, engine, ww, vaddr);
+ if (err)
+ return err;
+
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) {
+ lrc_init_state(ce, engine, *vaddr);
+
+ __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
+ }
+
+ return 0;
+}
+
+static int execlists_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return __execlists_context_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int execlists_context_pin(struct intel_context *ce, void *vaddr)
+{
+ return lrc_pin(ce, ce->engine, vaddr);
+}
+
+static int execlists_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static void execlists_context_cancel_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = NULL;
+
+ i915_request_active_engine(rq, &engine);
+
+ if (engine && intel_engine_pulse(engine))
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "request cancellation by %s",
+ current->comm);
+}
+
+static struct intel_context *
+execlists_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width)
+{
+ struct intel_context *parent = NULL, *ce, *err;
+ int i;
+
+ GEM_BUG_ON(num_siblings != 1);
+
+ for (i = 0; i < width; ++i) {
+ ce = intel_context_create(engines[i]);
+ if (IS_ERR(ce)) {
+ err = ce;
+ goto unwind;
+ }
+
+ if (i == 0)
+ parent = ce;
+ else
+ intel_context_bind_parent_child(parent, ce);
+ }
+
+ parent->parallel.fence_context = dma_fence_context_alloc(1);
+
+ intel_context_set_nopreempt(parent);
+ for_each_child(parent, ce)
+ intel_context_set_nopreempt(ce);
+
+ return parent;
+
+unwind:
+ if (parent)
+ intel_context_put(parent);
+ return err;
+}
+
+static const struct intel_context_ops execlists_context_ops = {
+ .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
+
+ .alloc = execlists_context_alloc,
+
+ .cancel_request = execlists_context_cancel_request,
+
+ .pre_pin = execlists_context_pre_pin,
+ .pin = execlists_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = lrc_reset,
+ .destroy = lrc_destroy,
+
+ .create_parallel = execlists_create_parallel,
+ .create_virtual = execlists_create_virtual,
+};
+
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ intel_ring_advance(rq, cs);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int execlists_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ if (!i915_vm_is_4lvl(request->context->vm)) {
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+ }
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+}
+
+static void reset_csb_pointers(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ ring_set_paused(engine, 0);
+
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ /* Check that the GPU does indeed update the CSB entries! */
+ memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
+ drm_clflush_virt_range(execlists->csb_status,
+ execlists->csb_size *
+ sizeof(execlists->csb_status));
+
+ /* Once more for luck and our trusty paranoia */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ 0xffff << 16 | reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
+ GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void execlists_sanitize(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(execlists_active(&engine->execlists));
+
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ reset_csb_pointers(engine);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
+}
+
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+ u32 status;
+
+ engine->execlists.error_interrupt = 0;
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+ status = ENGINE_READ(engine, RING_ESR);
+ if (unlikely(status)) {
+ drm_err(&engine->i915->drm,
+ "engine '%s' resumed still in error: %08x\n",
+ engine->name, status);
+ __intel_gt_reset(engine->gt, engine->mask);
+ }
+
+ /*
+ * On current gen8+, we have 2 signals to play with
+ *
+ * - I915_ERROR_INSTUCTION (bit 0)
+ *
+ * Generate an error if the command parser encounters an invalid
+ * instruction
+ *
+ * This is a fatal error.
+ *
+ * - CP_PRIV (bit 2)
+ *
+ * Generate an error on privilege violation (where the CP replaces
+ * the instruction with a no-op). This also fires for writes into
+ * read-only scratch pages.
+ *
+ * This is a non-fatal error, parsing continues.
+ *
+ * * there are a few others defined for odd HW that we do not use
+ *
+ * Since CP_PRIV fires for cases where we have chosen to ignore the
+ * error (as the HW is validating and suppressing the mistakes), we
+ * only unmask the instruction error bit.
+ */
+ ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
+static void enable_execlists(struct intel_engine_cs *engine)
+{
+ u32 mode;
+
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ if (GRAPHICS_VER(engine->i915) >= 11)
+ mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+ else
+ mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+
+ enable_error_interrupt(engine);
+}
+
+static int execlists_resume(struct intel_engine_cs *engine)
+{
+ intel_mocs_init_engine(engine);
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ enable_execlists(engine);
+
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ xehp_enable_ccs_engines(engine);
+
+ return 0;
+}
+
+static void execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&engine->sched_engine->tasklet.count));
+
+ /*
+ * Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its execlists->tasklet *just* as we are
+ * calling engine->resume() and also writing the ELSP.
+ * Turning off the execlists->tasklet until the reset is over
+ * prevents the race.
+ */
+ __tasklet_disable_sync_once(&engine->sched_engine->tasklet);
+ GEM_BUG_ON(!reset_in_progress(engine));
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ ring_set_paused(engine, 1);
+ intel_engine_stop_cs(engine);
+
+ /*
+ * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+ * to wait for any pending mi force wakeups
+ */
+ if (IS_GRAPHICS_VER(engine->i915, 11, 12))
+ intel_engine_wait_for_pending_mi_fw(engine);
+
+ engine->execlists.reset_ccid = active_ccid(engine);
+}
+
+static struct i915_request **
+reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ drm_clflush_virt_range(execlists->csb_write,
+ sizeof(execlists->csb_write[0]));
+
+ inactive = process_csb(engine, inactive); /* drain preemption events */
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(engine);
+
+ return inactive;
+}
+
+static void
+execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 head;
+
+ /*
+ * Save the currently executing context, even if we completed
+ * its request, it was still running at the time of the
+ * reset and will have been clobbered.
+ */
+ rq = active_context(engine, engine->execlists.reset_ccid);
+ if (!rq)
+ return;
+
+ ce = rq->context;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+ if (__i915_request_is_complete(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ head = intel_ring_wrap(ce->ring, rq->tail);
+ goto out_replay;
+ }
+
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
+ rq = active_request(ce->timeline, rq);
+ head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(head == ce->ring->tail);
+
+ /*
+ * If this request hasn't started yet, e.g. it is waiting on a
+ * semaphore, we need to avoid skipping the request or else we
+ * break the signaling chain. However, if the context is corrupt
+ * the request will not restart and we will be stuck with a wedged
+ * device. It is quite often the case that if we issue a reset
+ * while the GPU is loading the context image, that the context
+ * image becomes corrupt.
+ *
+ * Otherwise, if we have not started yet, the request should replay
+ * perfectly and we do not need to flag the result as being erroneous.
+ */
+ if (!__i915_request_has_started(rq))
+ goto out_replay;
+
+ /*
+ * If the request was innocent, we leave the request in the ELSP
+ * and will try to replay it on restarting. The context image may
+ * have been corrupted by the reset, in which case we may have
+ * to service a new GPU hang, but more likely we can continue on
+ * without impact.
+ *
+ * If the request was guilty, we presume the context is corrupt
+ * and have to at least restore the RING register in the context
+ * image back to the expected values to skip over the guilty request.
+ */
+ __i915_request_reset(rq, stalled);
+
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+out_replay:
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ head, ce->ring->tail);
+ lrc_reset_regs(ce, engine);
+ ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
+
+static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct i915_request **inactive;
+
+ rcu_read_lock();
+ inactive = reset_csb(engine, post);
+
+ execlists_reset_active(engine, true);
+
+ inactive = cancel_port_requests(execlists, inactive);
+ post_process_csb(post, inactive);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /* Process the csb, find the guilty context and throw away */
+ execlists_reset_csb(engine, stalled);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+ rcu_read_unlock();
+}
+
+static void nop_submission_tasklet(struct tasklet_struct *t)
+{
+ struct i915_sched_engine *sched_engine =
+ from_tasklet(sched_engine, t, tasklet);
+ struct intel_engine_cs * const engine = sched_engine->private_data;
+
+ /* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
+}
+
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched_engine * const sched_engine = engine->sched_engine;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ ENGINE_TRACE(engine, "\n");
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ execlists_reset_csb(engine, true);
+
+ rcu_read_lock();
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
+ i915_request_put(i915_request_mark_eio(rq));
+ intel_engine_signal_breadcrumbs(engine);
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&sched_engine->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+
+ priolist_for_each_request_consume(rq, rn, p) {
+ if (i915_request_mark_eio(rq)) {
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
+ }
+
+ rb_erase_cached(&p->node, &sched_engine->queue);
+ i915_priolist_free(p);
+ }
+
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &sched_engine->hold, sched.link)
+ i915_request_put(i915_request_mark_eio(rq));
+
+ /* Cancel all attached virtual engines */
+ while ((rb = rb_first_cached(&execlists->virtual))) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ spin_lock(&ve->base.sched_engine->lock);
+ rq = fetch_and_zero(&ve->request);
+ if (rq) {
+ if (i915_request_mark_eio(rq)) {
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
+ i915_request_put(rq);
+
+ ve->base.sched_engine->queue_priority_hint = INT_MIN;
+ }
+ spin_unlock(&ve->base.sched_engine->lock);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ sched_engine->queue_priority_hint = INT_MIN;
+ sched_engine->queue = RB_ROOT_CACHED;
+
+ GEM_BUG_ON(__tasklet_is_enabled(&engine->sched_engine->tasklet));
+ engine->sched_engine->tasklet.callback = nop_submission_tasklet;
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+ rcu_read_unlock();
+}
+
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ /*
+ * After a GPU reset, we may have requests to replay. Do so now while
+ * we still have the forcewake to be sure that the GPU is not allowed
+ * to sleep before we restart and reload a context.
+ *
+ * If the GPU reset fails, the engine may still be alive with requests
+ * inflight. We expect those to complete, or for the device to be
+ * reset as the next level of recovery, and as a final resort we
+ * will declare the device wedged.
+ */
+ GEM_BUG_ON(!reset_in_progress(engine));
+
+ /* And kick in case we missed a new request submission. */
+ if (__tasklet_enable(&engine->sched_engine->tasklet))
+ __execlists_kick(execlists);
+
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&engine->sched_engine->tasklet.count));
+}
+
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ ENGINE_POSTING_READ(engine, RING_IMR);
+}
+
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+}
+
+static void execlists_park(struct intel_engine_cs *engine)
+{
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
+}
+
+static void add_to_engine(struct i915_request *rq)
+{
+ lockdep_assert_held(&rq->engine->sched_engine->lock);
+ list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
+}
+
+static void remove_from_engine(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine, *locked;
+
+ /*
+ * Virtual engines complicate acquiring the engine timeline lock,
+ * as their rq->engine pointer is not stable until under that
+ * engine lock. The simple ploy we use is to take the lock then
+ * check that the rq still belongs to the newly locked engine.
+ */
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->sched_engine->lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->sched_engine->lock);
+ spin_lock(&engine->sched_engine->lock);
+ locked = engine;
+ }
+ list_del_init(&rq->sched.link);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+
+ /* Prevent further __await_execution() registering a cb, then flush */
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+
+ spin_unlock_irq(&locked->sched_engine->lock);
+
+ i915_request_notify_execute_cb_imm(rq);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER(engine->i915) > 8)
+ return true;
+
+ /* GPGPU on bdw requires extra w/a; not implemented */
+ return engine->class != RENDER_CLASS;
+}
+
+static void kick_execlists(const struct i915_request *rq, int prio)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_sched_engine *sched_engine = engine->sched_engine;
+ const struct i915_request *inflight;
+
+ /*
+ * We only need to kick the tasklet once for the high priority
+ * new context we add into the queue.
+ */
+ if (prio <= sched_engine->queue_priority_hint)
+ return;
+
+ rcu_read_lock();
+
+ /* Nothing currently active? We're overdue for a submission! */
+ inflight = execlists_active(&engine->execlists);
+ if (!inflight)
+ goto unlock;
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves.
+ */
+ if (inflight->context == rq->context)
+ goto unlock;
+
+ ENGINE_TRACE(engine,
+ "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+ prio,
+ rq->fence.context, rq->fence.seqno,
+ inflight->fence.context, inflight->fence.seqno,
+ inflight->sched.attr.priority);
+
+ sched_engine->queue_priority_hint = prio;
+
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ */
+ if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+ tasklet_hi_schedule(&sched_engine->tasklet);
+
+unlock:
+ rcu_read_unlock();
+}
+
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = execlists_submit_request;
+ engine->sched_engine->schedule = i915_schedule;
+ engine->sched_engine->kick_backend = kick_execlists;
+ engine->sched_engine->tasklet.callback = execlists_submission_tasklet;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+ /* Synchronise with residual timers and any softirq they raise */
+ del_timer_sync(&engine->execlists.timer);
+ del_timer_sync(&engine->execlists.preempt);
+ tasklet_kill(&engine->sched_engine->tasklet);
+}
+
+static void execlists_release(struct intel_engine_cs *engine)
+{
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ execlists_shutdown(engine);
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
+ ktime_t *now)
+{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+ ktime_t total = stats->total;
+
+ /*
+ * If the engine is executing something at the moment
+ * add it to the total.
+ */
+ *now = ktime_get();
+ if (READ_ONCE(stats->active))
+ total = ktime_add(total, ktime_sub(*now, stats->start));
+
+ return total;
+}
+
+static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
+ ktime_t *now)
+{
+ struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqcount_begin(&stats->lock);
+ total = __execlists_engine_busyness(engine, now);
+ } while (read_seqcount_retry(&stats->lock, seq));
+
+ return total;
+}
+
+static void
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overridden by each engine. */
+
+ engine->resume = execlists_resume;
+
+ engine->cops = &execlists_context_ops;
+ engine->request_alloc = execlists_request_alloc;
+ engine->add_active_request = add_to_engine;
+ engine->remove_active_request = remove_from_engine;
+
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
+ engine->reset.finish = execlists_reset_finish;
+
+ engine->park = execlists_park;
+ engine->unpark = NULL;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (GRAPHICS_VER(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = execlists_set_default_submission;
+
+ if (GRAPHICS_VER(engine->i915) < 11) {
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
+ } else {
+ /*
+ * TODO: On Gen11 interrupt masks need to be clear
+ * to allow C6 entry. Keep interrupts enabled at
+ * and take the hit of generating extra interrupts
+ * until a more refined solution exists.
+ */
+ }
+ intel_engine_set_irq_handler(engine, execlists_irq_handler);
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (!intel_vgpu_active(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (can_preempt(engine)) {
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (CONFIG_DRM_I915_TIMESLICE_DURATION)
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
+ }
+
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen125_emit_bb_start;
+ else
+ engine->emit_bb_start = gen125_emit_bb_start_noarb;
+ } else {
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+ }
+
+ engine->busyness = execlists_engine_busyness;
+}
+
+static void logical_ring_default_irqs(struct intel_engine_cs *engine)
+{
+ unsigned int shift = 0;
+
+ if (GRAPHICS_VER(engine->i915) < 11) {
+ const u8 irq_shifts[] = {
+ [RCS0] = GEN8_RCS_IRQ_SHIFT,
+ [BCS0] = GEN8_BCS_IRQ_SHIFT,
+ [VCS0] = GEN8_VCS0_IRQ_SHIFT,
+ [VCS1] = GEN8_VCS1_IRQ_SHIFT,
+ [VECS0] = GEN8_VECS_IRQ_SHIFT,
+ };
+
+ shift = irq_shifts[engine->id];
+ }
+
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (GRAPHICS_VER(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ u32 base = engine->mmio_base;
+
+ tasklet_setup(&engine->sched_engine->tasklet, execlists_submission_tasklet);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
+
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine);
+
+ seqcount_init(&engine->stats.execlists.lock);
+
+ if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ execlists->ctrl_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
+
+ engine->fw_domain = intel_uncore_forcewake_for_reg(engine->uncore,
+ RING_EXECLIST_CONTROL(engine->mmio_base),
+ FW_REG_WRITE);
+ } else {
+ execlists->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
+ }
+
+ execlists->csb_status =
+ (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+
+ execlists->csb_write =
+ &engine->status_page.addr[INTEL_HWS_CSB_WRITE_INDEX(i915)];
+
+ if (GRAPHICS_VER(i915) < 11)
+ execlists->csb_size = GEN8_CSB_ENTRIES;
+ else
+ execlists->csb_size = GEN11_CSB_ENTRIES;
+
+ engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
+ if (GRAPHICS_VER(engine->i915) >= 11 &&
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) {
+ execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ }
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = execlists_sanitize;
+ engine->release = execlists_release;
+
+ return 0;
+}
+
+static struct list_head *virtual_queue(struct virtual_engine *ve)
+{
+ return &ve->base.sched_engine->default_priolist.requests;
+}
+
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
+{
+ struct virtual_engine *ve =
+ container_of(wrk, typeof(*ve), rcu.work);
+ unsigned int n;
+
+ GEM_BUG_ON(ve->context.inflight);
+
+ /* Preempt-to-busy may leave a stale request behind. */
+ if (unlikely(ve->request)) {
+ struct i915_request *old;
+
+ spin_lock_irq(&ve->base.sched_engine->lock);
+
+ old = fetch_and_zero(&ve->request);
+ if (old) {
+ GEM_BUG_ON(!__i915_request_is_complete(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ spin_unlock_irq(&ve->base.sched_engine->lock);
+ }
+
+ /*
+ * Flush the tasklet in case it is still running on another core.
+ *
+ * This needs to be done before we remove ourselves from the siblings'
+ * rbtrees as in the case it is running in parallel, it may reinsert
+ * the rb_node into a sibling.
+ */
+ tasklet_kill(&ve->base.sched_engine->tasklet);
+
+ /* Decouple ourselves from the siblings, no more access allowed. */
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct rb_node *node = &ve->nodes[sibling->id].rb;
+
+ if (RB_EMPTY_NODE(node))
+ continue;
+
+ spin_lock_irq(&sibling->sched_engine->lock);
+
+ /* Detachment is lazily performed in the sched_engine->tasklet */
+ if (!RB_EMPTY_NODE(node))
+ rb_erase_cached(node, &sibling->execlists.virtual);
+
+ spin_unlock_irq(&sibling->sched_engine->lock);
+ }
+ GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched_engine->tasklet));
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+
+ lrc_fini(&ve->context);
+ intel_context_fini(&ve->context);
+
+ if (ve->base.breadcrumbs)
+ intel_breadcrumbs_put(ve->base.breadcrumbs);
+ if (ve->base.sched_engine)
+ i915_sched_engine_put(ve->base.sched_engine);
+ intel_engine_free_request_pool(&ve->base);
+
+ kfree(ve);
+}
+
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+
+ GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+ /*
+ * When destroying the virtual engine, we have to be aware that
+ * it may still be in use from an hardirq/softirq context causing
+ * the resubmission of a completed request (background completion
+ * due to preempt-to-busy). Before we can free the engine, we need
+ * to flush the submission code and tasklets that are still potentially
+ * accessing the engine. Flushing the tasklets requires process context,
+ * and since we can guard the resubmit onto the engine with an RCU read
+ * lock, we can delegate the free of the engine to an RCU worker.
+ */
+ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+ queue_rcu_work(system_wq, &ve->rcu);
+}
+
+static void virtual_engine_initial_hint(struct virtual_engine *ve)
+{
+ int swp;
+
+ /*
+ * Pick a random sibling on starting to help spread the load around.
+ *
+ * New contexts are typically created with exactly the same order
+ * of siblings, and often started in batches. Due to the way we iterate
+ * the array of sibling when submitting requests, sibling[0] is
+ * prioritised for dequeuing. If we make sure that sibling[0] is fairly
+ * randomised across the system, we also help spread the load by the
+ * first engine we inspect being different each time.
+ *
+ * NB This does not force us to execute on this engine, it will just
+ * typically be the first we inspect for submission.
+ */
+ swp = prandom_u32_max(ve->num_siblings);
+ if (swp)
+ swap(ve->siblings[swp], ve->siblings[0]);
+}
+
+static int virtual_context_alloc(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_alloc(ce, ve->siblings[0]);
+}
+
+static int virtual_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ /* Note: we must use a real engine class for setting up reg state */
+ return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr);
+}
+
+static int virtual_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return lrc_pin(ce, ve->siblings[0], vaddr);
+}
+
+static void virtual_context_enter(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_get(ve->siblings[n]);
+
+ intel_timeline_enter(ce->timeline);
+}
+
+static void virtual_context_exit(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ intel_timeline_exit(ce->timeline);
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_put(ve->siblings[n]);
+}
+
+static struct intel_engine_cs *
+virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+
+ if (sibling >= ve->num_siblings)
+ return NULL;
+
+ return ve->siblings[sibling];
+}
+
+static const struct intel_context_ops virtual_context_ops = {
+ .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
+
+ .alloc = virtual_context_alloc,
+
+ .cancel_request = execlists_context_cancel_request,
+
+ .pre_pin = virtual_context_pre_pin,
+ .pin = virtual_context_pin,
+ .unpin = lrc_unpin,
+ .post_unpin = lrc_post_unpin,
+
+ .enter = virtual_context_enter,
+ .exit = virtual_context_exit,
+
+ .destroy = virtual_context_destroy,
+
+ .get_sibling = virtual_get_sibling,
+};
+
+static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+{
+ struct i915_request *rq;
+ intel_engine_mask_t mask;
+
+ rq = READ_ONCE(ve->request);
+ if (!rq)
+ return 0;
+
+ /* The rq is ready for submission; rq->execution_mask is now stable. */
+ mask = rq->execution_mask;
+ if (unlikely(!mask)) {
+ /* Invalid selection, submit to a random engine in error */
+ i915_request_set_error_once(rq, -ENODEV);
+ mask = ve->siblings[0]->mask;
+ }
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.sched_engine->queue_priority_hint);
+
+ return mask;
+}
+
+static void virtual_submission_tasklet(struct tasklet_struct *t)
+{
+ struct i915_sched_engine *sched_engine =
+ from_tasklet(sched_engine, t, tasklet);
+ struct virtual_engine * const ve =
+ (struct virtual_engine *)sched_engine->private_data;
+ const int prio = READ_ONCE(sched_engine->queue_priority_hint);
+ intel_engine_mask_t mask;
+ unsigned int n;
+
+ rcu_read_lock();
+ mask = virtual_submission_mask(ve);
+ rcu_read_unlock();
+ if (unlikely(!mask))
+ return;
+
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
+ struct ve_node * const node = &ve->nodes[sibling->id];
+ struct rb_node **parent, *rb;
+ bool first;
+
+ if (!READ_ONCE(ve->request))
+ break; /* already handled by a sibling's tasklet */
+
+ spin_lock_irq(&sibling->sched_engine->lock);
+
+ if (unlikely(!(mask & sibling->mask))) {
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ rb_erase_cached(&node->rb,
+ &sibling->execlists.virtual);
+ RB_CLEAR_NODE(&node->rb);
+ }
+
+ goto unlock_engine;
+ }
+
+ if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
+ /*
+ * Cheat and avoid rebalancing the tree if we can
+ * reuse this node in situ.
+ */
+ first = rb_first_cached(&sibling->execlists.virtual) ==
+ &node->rb;
+ if (prio == node->prio || (prio > node->prio && first))
+ goto submit_engine;
+
+ rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ }
+
+ rb = NULL;
+ first = true;
+ parent = &sibling->execlists.virtual.rb_root.rb_node;
+ while (*parent) {
+ struct ve_node *other;
+
+ rb = *parent;
+ other = rb_entry(rb, typeof(*other), rb);
+ if (prio > other->prio) {
+ parent = &rb->rb_left;
+ } else {
+ parent = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb, rb, parent);
+ rb_insert_color_cached(&node->rb,
+ &sibling->execlists.virtual,
+ first);
+
+submit_engine:
+ GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
+ node->prio = prio;
+ if (first && prio > sibling->sched_engine->queue_priority_hint)
+ tasklet_hi_schedule(&sibling->sched_engine->tasklet);
+
+unlock_engine:
+ spin_unlock_irq(&sibling->sched_engine->lock);
+
+ if (intel_context_inflight(&ve->context))
+ break;
+ }
+}
+
+static void virtual_submit_request(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ unsigned long flags;
+
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
+
+ GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+
+ spin_lock_irqsave(&ve->base.sched_engine->lock, flags);
+
+ /* By the time we resubmit a request, it may be completed */
+ if (__i915_request_is_complete(rq)) {
+ __i915_request_submit(rq);
+ goto unlock;
+ }
+
+ if (ve->request) { /* background completion from preempt-to-busy */
+ GEM_BUG_ON(!__i915_request_is_complete(ve->request));
+ __i915_request_submit(ve->request);
+ i915_request_put(ve->request);
+ }
+
+ ve->base.sched_engine->queue_priority_hint = rq_prio(rq);
+ ve->request = i915_request_get(rq);
+
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
+ tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
+
+unlock:
+ spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
+}
+
+static struct intel_context *
+execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags)
+{
+ struct virtual_engine *ve;
+ unsigned int n;
+ int err;
+
+ ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
+ if (!ve)
+ return ERR_PTR(-ENOMEM);
+
+ ve->base.i915 = siblings[0]->i915;
+ ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
+ ve->base.id = -1;
+
+ ve->base.class = OTHER_CLASS;
+ ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+ ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+
+ /*
+ * The decision on whether to submit a request using semaphores
+ * depends on the saturated state of the engine. We only compute
+ * this during HW submission of the request, and we need for this
+ * state to be globally applied to all requests being submitted
+ * to this engine. Virtual engines encompass more than one physical
+ * engine and so we cannot accurately tell in advance if one of those
+ * engines is already saturated and so cannot afford to use a semaphore
+ * and be pessimized in priority for doing so -- if we are the only
+ * context using semaphores after all other clients have stopped, we
+ * will be starved on the saturated system. Such a global switch for
+ * semaphores is less than ideal, but alas is the current compromise.
+ */
+ ve->base.saturated = ALL_ENGINES;
+
+ snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+ intel_engine_init_execlists(&ve->base);
+
+ ve->base.sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
+ if (!ve->base.sched_engine) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+ ve->base.sched_engine->private_data = &ve->base;
+
+ ve->base.cops = &virtual_context_ops;
+ ve->base.request_alloc = execlists_request_alloc;
+
+ ve->base.sched_engine->schedule = i915_schedule;
+ ve->base.sched_engine->kick_backend = kick_execlists;
+ ve->base.submit_request = virtual_submit_request;
+
+ INIT_LIST_HEAD(virtual_queue(ve));
+ tasklet_setup(&ve->base.sched_engine->tasklet, virtual_submission_tasklet);
+
+ intel_context_init(&ve->context, &ve->base);
+
+ ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
+ if (!ve->base.breadcrumbs) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *sibling = siblings[n];
+
+ GEM_BUG_ON(!is_power_of_2(sibling->mask));
+ if (sibling->mask & ve->base.mask) {
+ DRM_DEBUG("duplicate %s entry in load balancer\n",
+ sibling->name);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /*
+ * The virtual engine implementation is tightly coupled to
+ * the execlists backend -- we push out request directly
+ * into a tree inside each physical engine. We could support
+ * layering if we handle cloning of the requests and
+ * submitting a copy into each backend.
+ */
+ if (sibling->sched_engine->tasklet.callback !=
+ execlists_submission_tasklet) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
+ RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+
+ ve->siblings[ve->num_siblings++] = sibling;
+ ve->base.mask |= sibling->mask;
+ ve->base.logical_mask |= sibling->logical_mask;
+
+ /*
+ * All physical engines must be compatible for their emission
+ * functions (as we build the instructions during request
+ * construction and do not alter them before submission
+ * on the physical engine). We use the engine class as a guide
+ * here, although that could be refined.
+ */
+ if (ve->base.class != OTHER_CLASS) {
+ if (ve->base.class != sibling->class) {
+ DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
+ err = -EINVAL;
+ goto err_put;
+ }
+ continue;
+ }
+
+ ve->base.class = sibling->class;
+ ve->base.uabi_class = sibling->uabi_class;
+ snprintf(ve->base.name, sizeof(ve->base.name),
+ "v%dx%d", ve->base.class, count);
+ ve->base.context_size = sibling->context_size;
+
+ ve->base.add_active_request = sibling->add_active_request;
+ ve->base.remove_active_request = sibling->remove_active_request;
+ ve->base.emit_bb_start = sibling->emit_bb_start;
+ ve->base.emit_flush = sibling->emit_flush;
+ ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
+ ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
+ ve->base.emit_fini_breadcrumb_dw =
+ sibling->emit_fini_breadcrumb_dw;
+
+ ve->base.flags = sibling->flags;
+ }
+
+ ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
+ virtual_engine_initial_hint(ve);
+ return &ve->context;
+
+err_put:
+ intel_context_put(&ve->context);
+ return ERR_PTR(err);
+}
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max)
+{
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ struct i915_sched_engine *sched_engine = engine->sched_engine;
+ struct i915_request *rq, *last;
+ unsigned long flags;
+ unsigned int count;
+ struct rb_node *rb;
+
+ spin_lock_irqsave(&sched_engine->lock, flags);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &sched_engine->requests, sched.link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ if (sched_engine->queue_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tQueue priority hint: %d\n",
+ READ_ONCE(sched_engine->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+
+ priolist_for_each_request(rq, p) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (rq) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d virtual requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+static unsigned long list_count(struct list_head *list)
+{
+ struct list_head *pos;
+ unsigned long count = 0;
+
+ list_for_each(pos, list)
+ count++;
+
+ return count;
+}
+
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
+
+ drm_printf(m, "\tOn hold?: %lu\n",
+ list_count(&engine->sched_engine->hold));
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_execlists.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
new file mode 100644
index 000000000..d2c7d45ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __INTEL_EXECLISTS_SUBMISSION_H__
+#define __INTEL_EXECLISTS_SUBMISSION_H__
+
+#include <linux/llist.h>
+#include <linux/types.h>
+
+struct drm_printer;
+
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
+struct intel_gt;
+
+enum {
+ INTEL_CONTEXT_SCHEDULE_IN = 0,
+ INTEL_CONTEXT_SCHEDULE_OUT,
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED,
+};
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine);
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max);
+
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
+#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
new file mode 100644
index 000000000..a6d0463b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -0,0 +1,1330 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <asm/set_memory.h>
+#include <asm/smp.h>
+#include <linux/types.h>
+#include <linux/stop_machine.h>
+
+#include <drm/i915_drm.h>
+#include <drm/intel-gtt.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "intel_ggtt_gmch.h"
+#include "intel_gt.h"
+#include "intel_gt_regs.h"
+#include "intel_pci_config.h"
+#include "i915_drv.h"
+#include "i915_pci.h"
+#include "i915_scatterlist.h"
+#include "i915_utils.h"
+#include "i915_vgpu.h"
+
+#include "intel_gtt.h"
+#include "gen8_ppgtt.h"
+
+static inline bool suspend_retains_ptes(struct i915_address_space *vm)
+{
+ return GRAPHICS_VER(vm->i915) >= 8 &&
+ !HAS_LMEM(vm->i915) &&
+ vm->is_ggtt;
+}
+
+static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
+{
+ if (i915_node_color_differs(node, color))
+ *start += I915_GTT_PAGE_SIZE;
+
+ /*
+ * Also leave a space between the unallocated reserved node after the
+ * GTT and any objects within the GTT, i.e. we use the color adjustment
+ * to insert a guard page to prevent prefetches crossing over the
+ * GTT boundary.
+ */
+ node = list_next_entry(node, node_list);
+ if (node->color != color)
+ *end -= I915_GTT_PAGE_SIZE;
+}
+
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+ ggtt->vm.is_ggtt = true;
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
+
+ if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
+ ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
+
+ if (ggtt->mappable_end) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
+ return -EIO;
+ }
+
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+ }
+
+ intel_ggtt_init_fences(ggtt);
+
+ return 0;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @i915: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *i915)
+{
+ int ret;
+
+ /*
+ * Note that we use page colouring to enforce a guard page at the
+ * end of the address space. This is required as the CS may prefetch
+ * beyond the end of the batch buffer, across the page boundary,
+ * and beyond the end of the GTT if we do not provide a guard.
+ */
+ ret = ggtt_init_hw(to_gt(i915)->ggtt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Return the value of the last GGTT pte cast to an u64, if
+ * the system is supposed to retain ptes across resume. 0 otherwise.
+ */
+static u64 read_last_pte(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *ptep;
+
+ if (!suspend_retains_ptes(vm))
+ return 0;
+
+ GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
+ ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
+ return readq(ptep);
+}
+
+/**
+ * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to suspend the mappings for
+ *
+ * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ */
+void i915_ggtt_suspend_vm(struct i915_address_space *vm)
+{
+ struct i915_vma *vma, *vn;
+ int save_skip_rewrite;
+
+ drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+
+retry:
+ i915_gem_drain_freed_objects(vm->i915);
+
+ mutex_lock(&vm->mutex);
+
+ /*
+ * Skip rewriting PTE on VMA unbind.
+ * FIXME: Use an argument to i915_vma_unbind() instead?
+ */
+ save_skip_rewrite = vm->skip_pte_rewrite;
+ vm->skip_pte_rewrite = true;
+
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+ if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ continue;
+
+ /* unlikely to race when GPU is idle, so no worry about slowpath.. */
+ if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
+ /*
+ * No dead objects should appear here, GPU should be
+ * completely idle, and userspace suspended
+ */
+ i915_gem_object_get(obj);
+
+ mutex_unlock(&vm->mutex);
+
+ i915_gem_object_lock(obj, NULL);
+ GEM_WARN_ON(i915_vma_unbind(vma));
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ vm->skip_pte_rewrite = save_skip_rewrite;
+ goto retry;
+ }
+
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ i915_vma_wait_for_bind(vma);
+
+ __i915_vma_evict(vma, false);
+ drm_mm_remove_node(&vma->node);
+ }
+
+ i915_gem_object_unlock(obj);
+ }
+
+ if (!suspend_retains_ptes(vm))
+ vm->clear_range(vm, 0, vm->total);
+ else
+ i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
+
+ vm->skip_pte_rewrite = save_skip_rewrite;
+
+ mutex_unlock(&vm->mutex);
+}
+
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+{
+ i915_ggtt_suspend_vm(&ggtt->vm);
+ ggtt->invalidate(ggtt);
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+}
+
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
+ spin_unlock_irq(&uncore->lock);
+}
+
+static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
+ /*
+ * Note that as an uncached mmio write, this will flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
+ */
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+}
+
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+
+ gen8_ggtt_invalidate(ggtt);
+
+ if (GRAPHICS_VER(i915) >= 12)
+ intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ else
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
+
+ if (flags & PTE_LM)
+ pte |= GEN12_GGTT_PTE_LM;
+
+ return pte;
+}
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *gte;
+ gen8_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ gte = (gen8_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ gen8_set_pte(gte++, pte_encode | addr);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+ ggtt->invalidate(ggtt);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *gte;
+ gen6_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ gte = (gen6_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ iowrite32(vm->scratch[0]->encode, gte++);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct i915_vma_resource *vma_res;
+ enum i915_cache_level level;
+ u32 flags;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct insert_entries arg = { vm, vma_res, level, flags };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->scratch[0]->encode;
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+}
+
+void intel_ggtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+
+ if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
+ return;
+
+ vma_res->bound_flags |= flags;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (vma_res->bi.readonly)
+ pte_flags |= PTE_READ_ONLY;
+ if (vma_res->bi.lmem)
+ pte_flags |= PTE_LM;
+
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
+}
+
+void intel_ggtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+}
+
+/*
+ * Reserve the top of the GuC address space for firmware images. Addresses
+ * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
+ * which makes for a suitable range to hold GuC/HuC firmware images if the
+ * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
+ * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
+ * of the same size anyway, which is far more than needed, to keep the logic
+ * in uc_fw_ggtt_offset() simple.
+ */
+#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
+
+static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+{
+ u64 offset;
+ int ret;
+
+ if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
+ return 0;
+
+ GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
+ offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
+
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
+ GUC_TOP_RESERVE_SIZE, offset,
+ I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
+ if (ret)
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Failed to reserve top of GGTT for GuC\n");
+
+ return ret;
+}
+
+static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
+{
+ if (drm_mm_node_allocated(&ggtt->uc_fw))
+ drm_mm_remove_node(&ggtt->uc_fw);
+}
+
+static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
+{
+ ggtt_release_guc_top(ggtt);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+ mutex_destroy(&ggtt->error_mutex);
+}
+
+static int init_ggtt(struct i915_ggtt *ggtt)
+{
+ /*
+ * Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
+ unsigned long hole_start, hole_end;
+ struct drm_mm_node *entry;
+ int ret;
+
+ ggtt->pte_lost = true;
+
+ /*
+ * GuC requires all resources that we're sharing with it to be placed in
+ * non-WOPCM memory. If GuC is not present or not in use we still need a
+ * small bias as ring wraparound at offset 0 sometimes hangs. No idea
+ * why.
+ */
+ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
+ intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
+
+ ret = intel_vgt_balloon(ggtt);
+ if (ret)
+ return ret;
+
+ mutex_init(&ggtt->error_mutex);
+ if (ggtt->mappable_end) {
+ /*
+ * Reserve a mappable slot for our lockless error capture.
+ *
+ * We strongly prefer taking address 0x0 in order to protect
+ * other critical buffers against accidental overwrites,
+ * as writing to address 0 is a very common mistake.
+ *
+ * Since 0 may already be in use by the system (e.g. the BIOS
+ * framebuffer), we let the reservation fail quietly and hope
+ * 0 remains reserved always.
+ *
+ * If we fail to reserve 0, and then fail to find any space
+ * for an error-capture, remain silent. We can afford not
+ * to reserve an error_capture node as we have fallback
+ * paths, and we trust that 0 will remain reserved. However,
+ * the only likely reason for failure to insert is a driver
+ * bug, which we expect to cause other failures...
+ */
+ ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+ ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
+ if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
+ drm_mm_insert_node_in_range(&ggtt->vm.mm,
+ &ggtt->error_capture,
+ ggtt->error_capture.size, 0,
+ ggtt->error_capture.color,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ }
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Reserved GGTT:[%llx, %llx] for use by error capture\n",
+ ggtt->error_capture.start,
+ ggtt->error_capture.start + ggtt->error_capture.size);
+
+ /*
+ * The upper portion of the GuC address space has a sizeable hole
+ * (several MB) that is inaccessible by GuC. Reserve this range within
+ * GGTT as it can comfortably hold GuC/HuC firmware images.
+ */
+ ret = ggtt_reserve_guc_top(ggtt);
+ if (ret)
+ goto err;
+
+ /* Clear any non-preallocated blocks */
+ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
+ drm_dbg(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
+ ggtt->vm.clear_range(&ggtt->vm, hole_start,
+ hole_end - hole_start);
+ }
+
+ /* And finally clear the reserved guard page */
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
+
+ return 0;
+
+err:
+ cleanup_init_ggtt(ggtt);
+ return ret;
+}
+
+static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+
+ /* Currently applicable only to VLV */
+ pte_flags = 0;
+ if (vma_res->bi.readonly)
+ pte_flags |= PTE_READ_ONLY;
+
+ if (flags & I915_VMA_LOCAL_BIND)
+ ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
+ stash, vma_res, cache_level, flags);
+
+ if (flags & I915_VMA_GLOBAL_BIND)
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+
+ vma_res->bound_flags |= flags;
+}
+
+static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+ if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+
+ if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
+ ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
+}
+
+static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+ struct i915_vm_pt_stash stash = {};
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
+ err = -ENODEV;
+ goto err_ppgtt;
+ }
+
+ err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
+
+ i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
+ i915_gem_object_unlock(ppgtt->vm.scratch[0]);
+ if (err)
+ goto err_stash;
+
+ /*
+ * Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
+
+ ggtt->alias = ppgtt;
+ ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
+
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
+ ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
+
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
+ ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
+
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
+ return 0;
+
+err_stash:
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
+err_ppgtt:
+ i915_vm_put(&ppgtt->vm);
+ return err;
+}
+
+static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = fetch_and_zero(&ggtt->alias);
+ if (!ppgtt)
+ return;
+
+ i915_vm_put(&ppgtt->vm);
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+}
+
+int i915_init_ggtt(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = init_ggtt(to_gt(i915)->ggtt);
+ if (ret)
+ return ret;
+
+ if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+ ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
+ if (ret)
+ cleanup_init_ggtt(to_gt(i915)->ggtt);
+ }
+
+ return 0;
+}
+
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
+{
+ struct i915_vma *vma, *vn;
+
+ flush_workqueue(ggtt->vm.i915->wq);
+ i915_gem_drain_freed_objects(ggtt->vm.i915);
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ ggtt->vm.skip_pte_rewrite = true;
+
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ bool trylock;
+
+ trylock = i915_gem_object_trylock(obj, NULL);
+ WARN_ON(!trylock);
+
+ WARN_ON(__i915_vma_unbind(vma));
+ if (trylock)
+ i915_gem_object_unlock(obj);
+ }
+
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
+ mutex_destroy(&ggtt->error_mutex);
+
+ ggtt_release_guc_top(ggtt);
+ intel_vgt_deballoon(ggtt);
+
+ ggtt->vm.cleanup(&ggtt->vm);
+
+ mutex_unlock(&ggtt->vm.mutex);
+ i915_address_space_fini(&ggtt->vm);
+
+ arch_phys_wc_del(ggtt->mtrr);
+
+ if (ggtt->iomap.size)
+ io_mapping_fini(&ggtt->iomap);
+}
+
+/**
+ * i915_ggtt_driver_release - Clean up GGTT hardware initialization
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_release(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+ fini_aliasing_ppgtt(ggtt);
+
+ intel_ggtt_fini_fences(ggtt);
+ ggtt_cleanup_hw(ggtt);
+}
+
+/**
+ * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
+ * all free objects have been drained.
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+ GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
+ dma_resv_fini(&ggtt->vm._resv);
+}
+
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
+{
+ /*
+ * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
+ * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
+ */
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
+ return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
+}
+
+static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
+{
+ return gen6_gttmmadr_size(i915) / 2;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ phys_addr_t phys_addr;
+ u32 pte_flags;
+ int ret;
+
+ GEM_WARN_ON(pci_resource_len(pdev, GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+
+ /*
+ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+ ggtt->gsm = ioremap(phys_addr, size);
+ else
+ ggtt->gsm = ioremap_wc(phys_addr, size);
+ if (!ggtt->gsm) {
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ return -ENOMEM;
+ }
+
+ kref_init(&ggtt->vm.resv_ref);
+ ret = setup_scratch_page(&ggtt->vm);
+ if (ret) {
+ drm_err(&i915->drm, "Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(ggtt->gsm);
+ return ret;
+ }
+
+ pte_flags = 0;
+ if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
+ pte_flags |= PTE_LM;
+
+ ggtt->vm.scratch[0]->encode =
+ ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
+ I915_CACHE_NONE, pte_flags);
+
+ return 0;
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ free_scratch(vm);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
+{
+ return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ if (!HAS_LMEM(i915)) {
+ if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR))
+ return -ENXIO;
+
+ ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (IS_CHERRYVIEW(i915))
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
+ else
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
+
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+ /*
+ * Serialize GTT updates with aperture access on BXT if VT-d is on,
+ * and always on CHV.
+ */
+ if (intel_vm_no_concurrent_access_wa(i915)) {
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+
+ /*
+ * Calling stop_machine() version of GGTT update function
+ * at error capture/reset path will raise lockdep warning.
+ * Allow calling gen8_ggtt_insert_* directly at reset path
+ * which is safe from parallel GGTT updates.
+ */
+ ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
+
+ ggtt->vm.bind_async_flags =
+ I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ }
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR))
+ return -ENXIO;
+
+ ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+ /*
+ * 64/512MB is the current min/max we actually know of, but this is
+ * just a coarse sanity check.
+ */
+ if (ggtt->mappable_end < (64 << 20) ||
+ ggtt->mappable_end > (512 << 20)) {
+ drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+ &ggtt->mappable_end);
+ return -ENXIO;
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
+ if (HAS_EDRAM(i915))
+ ggtt->vm.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(i915))
+ ggtt->vm.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(i915))
+ ggtt->vm.pte_encode = byt_pte_encode;
+ else if (GRAPHICS_VER(i915) >= 7)
+ ggtt->vm.pte_encode = ivb_pte_encode;
+ else
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ int ret;
+
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = i915;
+ ggtt->vm.dma = i915->drm.dev;
+ dma_resv_init(&ggtt->vm._resv);
+
+ if (GRAPHICS_VER(i915) >= 8)
+ ret = gen8_gmch_probe(ggtt);
+ else if (GRAPHICS_VER(i915) >= 6)
+ ret = gen6_gmch_probe(ggtt);
+ else
+ ret = intel_ggtt_gmch_probe(ggtt);
+
+ if (ret) {
+ dma_resv_fini(&ggtt->vm._resv);
+ return ret;
+ }
+
+ if ((ggtt->vm.total - 1) >> 32) {
+ drm_err(&i915->drm,
+ "We never expected a Global GTT with more than 32bits"
+ " of address space! Found %lldM!\n",
+ ggtt->vm.total >> 20);
+ ggtt->vm.total = 1ULL << 32;
+ ggtt->mappable_end =
+ min_t(u64, ggtt->mappable_end, ggtt->vm.total);
+ }
+
+ if (ggtt->mappable_end > ggtt->vm.total) {
+ drm_err(&i915->drm,
+ "mappable aperture extends past end of GGTT,"
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->vm.total);
+ ggtt->mappable_end = ggtt->vm.total;
+ }
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
+ drm_dbg(&i915->drm, "GMADR size = %lluM\n",
+ (u64)ggtt->mappable_end >> 20);
+ drm_dbg(&i915->drm, "DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+
+ return 0;
+}
+
+/**
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @i915: i915 device
+ */
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
+ if (ret)
+ return ret;
+
+ if (i915_vtd_active(i915))
+ drm_info(&i915->drm, "VT-d active for gfx access\n");
+
+ return 0;
+}
+
+int i915_ggtt_enable_hw(struct drm_i915_private *i915)
+{
+ if (GRAPHICS_VER(i915) < 6)
+ return intel_ggtt_gmch_enable_hw(i915);
+
+ return 0;
+}
+
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
+{
+ GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
+
+ ggtt->invalidate = guc_ggtt_invalidate;
+
+ ggtt->invalidate(ggtt);
+}
+
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
+{
+ /* XXX Temporary pardon for error unload */
+ if (ggtt->invalidate == gen8_ggtt_invalidate)
+ return;
+
+ /* We should only be called after i915_ggtt_enable_guc() */
+ GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->invalidate(ggtt);
+}
+
+/**
+ * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to restore the mappings for
+ *
+ * Restore the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ *
+ * Returns %true if restoring the mapping for any object that was in a write
+ * domain before suspend.
+ */
+bool i915_ggtt_resume_vm(struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+ bool write_domain_objs = false;
+ bool retained_ptes;
+
+ drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+
+ /*
+ * First fill our portion of the GTT with scratch pages if
+ * they were not retained across suspend.
+ */
+ retained_ptes = suspend_retains_ptes(vm) &&
+ !i915_vm_to_ggtt(vm)->pte_lost &&
+ !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
+
+ if (!retained_ptes)
+ vm->clear_range(vm, 0, vm->total);
+
+ /* clflush objects bound into the GGTT and rebind them. */
+ list_for_each_entry(vma, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ unsigned int was_bound =
+ atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
+
+ GEM_BUG_ON(!was_bound);
+ if (!retained_ptes) {
+ /*
+ * Clear the bound flags of the vma resource to allow
+ * ptes to be repopulated.
+ */
+ vma->resource->bound_flags = 0;
+ vma->ops->bind_vma(vm, NULL, vma->resource,
+ obj ? obj->cache_level : 0,
+ was_bound);
+ }
+ if (obj) { /* only used during resume => exclusive access */
+ write_domain_objs |= fetch_and_zero(&obj->write_domain);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ }
+ }
+
+ return write_domain_objs;
+}
+
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
+{
+ bool flush;
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+ flush = i915_ggtt_resume_vm(&ggtt->vm);
+
+ ggtt->invalidate(ggtt);
+
+ if (flush)
+ wbinvd_on_all_cpus();
+
+ if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ intel_ggtt_restore_fences(ggtt);
+}
+
+void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
+{
+ to_gt(i915)->ggtt->pte_lost = val;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
new file mode 100644
index 000000000..ea775e601
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ */
+
+#include <linux/highmem.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_scatterlist.h"
+#include "i915_pvinfo.h"
+#include "i915_vgpu.h"
+#include "intel_gt_regs.h"
+#include "intel_mchbar_regs.h"
+
+/**
+ * DOC: fence register handling
+ *
+ * Important to avoid confusions: "fences" in the i915 driver are not execution
+ * fences used to track command completion but hardware detiler objects which
+ * wrap a given range of the global GTT. Each platform has only a fairly limited
+ * set of these objects.
+ *
+ * Fences are used to detile GTT memory mappings. They're also connected to the
+ * hardware frontbuffer render tracking and hence interact with frontbuffer
+ * compression. Furthermore on older platforms fences are required for tiled
+ * objects used by the display engine. They can also be used by the render
+ * engine - they're required for blitter commands and are optional for render
+ * commands. But on gen4+ both display (with the exception of fbc) and rendering
+ * have their own tiling state bits and don't need fences.
+ *
+ * Also note that fences only support X and Y tiling and hence can't be used for
+ * the fancier new tiling formats like W, Ys and Yf.
+ *
+ * Finally note that because fences are such a restricted resource they're
+ * dynamically associated with objects. Furthermore fence state is committed to
+ * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
+ * explicitly call i915_gem_object_get_fence() to synchronize fencing status
+ * for cpu access. Also note that some code wants an unfenced view, for those
+ * cases the fence can be removed forcefully with i915_gem_object_put_fence().
+ *
+ * Internally these functions will synchronize with userspace access by removing
+ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
+ */
+
+#define pipelined 0
+
+static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.i915;
+}
+
+static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.gt->uncore;
+}
+
+static void i965_write_fence_reg(struct i915_fence_reg *fence)
+{
+ i915_reg_t fence_reg_lo, fence_reg_hi;
+ int fence_pitch_shift;
+ u64 val;
+
+ if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) {
+ fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
+ fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
+ fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
+
+ } else {
+ fence_reg_lo = FENCE_REG_965_LO(fence->id);
+ fence_reg_hi = FENCE_REG_965_HI(fence->id);
+ fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+ }
+
+ val = 0;
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
+
+ GEM_BUG_ON(!IS_ALIGNED(stride, 128));
+
+ val = fence->start + fence->size - I965_FENCE_PAGE;
+ val <<= 32;
+ val |= fence->start;
+ val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
+ if (fence->tiling == I915_TILING_Y)
+ val |= BIT(I965_FENCE_TILING_Y_SHIFT);
+ val |= I965_FENCE_REG_VALID;
+ }
+
+ if (!pipelined) {
+ struct intel_uncore *uncore = fence_to_uncore(fence);
+
+ /*
+ * To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ intel_uncore_write_fw(uncore, fence_reg_lo, 0);
+ intel_uncore_posting_read_fw(uncore, fence_reg_lo);
+
+ intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
+ intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
+ intel_uncore_posting_read_fw(uncore, fence_reg_lo);
+ }
+}
+
+static void i915_write_fence_reg(struct i915_fence_reg *fence)
+{
+ u32 val;
+
+ val = 0;
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
+ unsigned int tiling = fence->tiling;
+ bool is_y_tiled = tiling == I915_TILING_Y;
+
+ if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
+ stride /= 128;
+ else
+ stride /= 512;
+ GEM_BUG_ON(!is_power_of_2(stride));
+
+ val = fence->start;
+ if (is_y_tiled)
+ val |= BIT(I830_FENCE_TILING_Y_SHIFT);
+ val |= I915_FENCE_SIZE_BITS(fence->size);
+ val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
+
+ val |= I830_FENCE_REG_VALID;
+ }
+
+ if (!pipelined) {
+ struct intel_uncore *uncore = fence_to_uncore(fence);
+ i915_reg_t reg = FENCE_REG(fence->id);
+
+ intel_uncore_write_fw(uncore, reg, val);
+ intel_uncore_posting_read_fw(uncore, reg);
+ }
+}
+
+static void i830_write_fence_reg(struct i915_fence_reg *fence)
+{
+ u32 val;
+
+ val = 0;
+ if (fence->tiling) {
+ unsigned int stride = fence->stride;
+
+ val = fence->start;
+ if (fence->tiling == I915_TILING_Y)
+ val |= BIT(I830_FENCE_TILING_Y_SHIFT);
+ val |= I830_FENCE_SIZE_BITS(fence->size);
+ val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ }
+
+ if (!pipelined) {
+ struct intel_uncore *uncore = fence_to_uncore(fence);
+ i915_reg_t reg = FENCE_REG(fence->id);
+
+ intel_uncore_write_fw(uncore, reg, val);
+ intel_uncore_posting_read_fw(uncore, reg);
+ }
+}
+
+static void fence_write(struct i915_fence_reg *fence)
+{
+ struct drm_i915_private *i915 = fence_to_i915(fence);
+
+ /*
+ * Previous access through the fence register is marshalled by
+ * the mb() inside the fault handlers (i915_gem_release_mmaps)
+ * and explicitly managed for internal users.
+ */
+
+ if (GRAPHICS_VER(i915) == 2)
+ i830_write_fence_reg(fence);
+ else if (GRAPHICS_VER(i915) == 3)
+ i915_write_fence_reg(fence);
+ else
+ i965_write_fence_reg(fence);
+
+ /*
+ * Access through the fenced region afterwards is
+ * ordered by the posting reads whilst writing the registers.
+ */
+}
+
+static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
+{
+ return GRAPHICS_VER(fence_to_i915(fence)) < 4;
+}
+
+static int fence_update(struct i915_fence_reg *fence,
+ struct i915_vma *vma)
+{
+ struct i915_ggtt *ggtt = fence->ggtt;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
+ intel_wakeref_t wakeref;
+ struct i915_vma *old;
+ int ret;
+
+ fence->tiling = 0;
+ if (vma) {
+ GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
+ !i915_gem_object_get_tiling(vma->obj));
+
+ if (!i915_vma_is_map_and_fenceable(vma))
+ return -EINVAL;
+
+ if (gpu_uses_fence_registers(fence)) {
+ /* implicit 'unfenced' GPU blits */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+ }
+
+ fence->start = vma->node.start;
+ fence->size = vma->fence_size;
+ fence->stride = i915_gem_object_get_stride(vma->obj);
+ fence->tiling = i915_gem_object_get_tiling(vma->obj);
+ }
+ WRITE_ONCE(fence->dirty, false);
+
+ old = xchg(&fence->vma, NULL);
+ if (old) {
+ /* XXX Ideally we would move the waiting to outside the mutex */
+ ret = i915_active_wait(&fence->active);
+ if (ret) {
+ fence->vma = old;
+ return ret;
+ }
+
+ i915_vma_flush_writes(old);
+
+ /*
+ * Ensure that all userspace CPU access is completed before
+ * stealing the fence.
+ */
+ if (old != vma) {
+ GEM_BUG_ON(old->fence != fence);
+ i915_vma_revoke_mmap(old);
+ old->fence = NULL;
+ }
+
+ list_move(&fence->link, &ggtt->fence_list);
+ }
+
+ /*
+ * We only need to update the register itself if the device is awake.
+ * If the device is currently powered down, we will defer the write
+ * to the runtime resume, see intel_ggtt_restore_fences().
+ *
+ * This only works for removing the fence register, on acquisition
+ * the caller must hold the rpm wakeref. The fence register must
+ * be cleared before we can use any other fences to ensure that
+ * the new fences do not overlap the elided clears, confusing HW.
+ */
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
+ if (!wakeref) {
+ GEM_BUG_ON(vma);
+ return 0;
+ }
+
+ WRITE_ONCE(fence->vma, vma);
+ fence_write(fence);
+
+ if (vma) {
+ vma->fence = fence;
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ }
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+ return 0;
+}
+
+/**
+ * i915_vma_revoke_fence - force-remove fence for a VMA
+ * @vma: vma to map linearly (not through a fence reg)
+ *
+ * This function force-removes any fence from the given object, which is useful
+ * if the kernel wants to do untiled GTT access.
+ */
+void i915_vma_revoke_fence(struct i915_vma *vma)
+{
+ struct i915_fence_reg *fence = vma->fence;
+ intel_wakeref_t wakeref;
+
+ lockdep_assert_held(&vma->vm->mutex);
+ if (!fence)
+ return;
+
+ GEM_BUG_ON(fence->vma != vma);
+ GEM_BUG_ON(!i915_active_is_idle(&fence->active));
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
+
+ fence->tiling = 0;
+ WRITE_ONCE(fence->vma, NULL);
+ vma->fence = NULL;
+
+ /*
+ * Skip the write to HW if and only if the device is currently
+ * suspended.
+ *
+ * If the driver does not currently hold a wakeref (if_in_use == 0),
+ * the device may currently be runtime suspended, or it may be woken
+ * up before the suspend takes place. If the device is not suspended
+ * (powered down) and we skip clearing the fence register, the HW is
+ * left in an undefined state where we may end up with multiple
+ * registers overlapping.
+ */
+ with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
+ fence_write(fence);
+}
+
+static bool fence_is_active(const struct i915_fence_reg *fence)
+{
+ return fence->vma && i915_vma_is_active(fence->vma);
+}
+
+static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
+{
+ struct i915_fence_reg *active = NULL;
+ struct i915_fence_reg *fence, *fn;
+
+ list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
+ GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+
+ if (fence == active) /* now seen this fence twice */
+ active = ERR_PTR(-EAGAIN);
+
+ /* Prefer idle fences so we do not have to wait on the GPU */
+ if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
+ if (!active)
+ active = fence;
+
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ continue;
+ }
+
+ if (atomic_read(&fence->pin_count))
+ continue;
+
+ return fence;
+ }
+
+ /* Wait for completion of pending flips which consume fences */
+ if (intel_has_pending_fb_unpin(ggtt->vm.i915))
+ return ERR_PTR(-EAGAIN);
+
+ return ERR_PTR(-ENOBUFS);
+}
+
+int __i915_vma_pin_fence(struct i915_vma *vma)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct i915_fence_reg *fence;
+ struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ int err;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (vma->fence) {
+ fence = vma->fence;
+ GEM_BUG_ON(fence->vma != vma);
+ atomic_inc(&fence->pin_count);
+ if (!fence->dirty) {
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ return 0;
+ }
+ } else if (set) {
+ fence = fence_find(ggtt);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
+ atomic_inc(&fence->pin_count);
+ } else {
+ return 0;
+ }
+
+ err = fence_update(fence, set);
+ if (err)
+ goto out_unpin;
+
+ GEM_BUG_ON(fence->vma != set);
+ GEM_BUG_ON(vma->fence != (set ? fence : NULL));
+
+ if (set)
+ return 0;
+
+out_unpin:
+ atomic_dec(&fence->pin_count);
+ return err;
+}
+
+/**
+ * i915_vma_pin_fence - set up fencing for a vma
+ * @vma: vma to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int i915_vma_pin_fence(struct i915_vma *vma)
+{
+ int err;
+
+ if (!vma->fence && !i915_gem_object_is_tiled(vma->obj))
+ return 0;
+
+ /*
+ * Note that we revoke fences on runtime suspend. Therefore the user
+ * must keep the device awake whilst using the fence.
+ */
+ assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ err = mutex_lock_interruptible(&vma->vm->mutex);
+ if (err)
+ return err;
+
+ err = __i915_vma_pin_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
+
+ return err;
+}
+
+/**
+ * i915_reserve_fence - Reserve a fence for vGPU
+ * @ggtt: Global GTT
+ *
+ * This function walks the fence regs looking for a free one and remove
+ * it from the fence_list. It is used to reserve fence for vGPU to use.
+ */
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
+{
+ struct i915_fence_reg *fence;
+ int count;
+ int ret;
+
+ lockdep_assert_held(&ggtt->vm.mutex);
+
+ /* Keep at least one fence available for the display engine. */
+ count = 0;
+ list_for_each_entry(fence, &ggtt->fence_list, link)
+ count += !atomic_read(&fence->pin_count);
+ if (count <= 1)
+ return ERR_PTR(-ENOSPC);
+
+ fence = fence_find(ggtt);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (fence->vma) {
+ /* Force-remove fence from VMA */
+ ret = fence_update(fence, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ list_del(&fence->link);
+
+ return fence;
+}
+
+/**
+ * i915_unreserve_fence - Reclaim a reserved fence
+ * @fence: the fence reg
+ *
+ * This function add a reserved fence register from vGPU to the fence_list.
+ */
+void i915_unreserve_fence(struct i915_fence_reg *fence)
+{
+ struct i915_ggtt *ggtt = fence->ggtt;
+
+ lockdep_assert_held(&ggtt->vm.mutex);
+
+ list_add(&fence->link, &ggtt->fence_list);
+}
+
+/**
+ * intel_ggtt_restore_fences - restore fence state
+ * @ggtt: Global GTT
+ *
+ * Restore the hw fence state to match the software tracking again, to be called
+ * after a gpu reset and on resume. Note that on runtime suspend we only cancel
+ * the fences, to be reacquired by the user later.
+ */
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
+{
+ int i;
+
+ for (i = 0; i < ggtt->num_fences; i++)
+ fence_write(&ggtt->fence_regs[i]);
+}
+
+/**
+ * DOC: tiling swizzling details
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an object out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @ggtt: Global GGTT
+ *
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+ if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) {
+ /*
+ * On BDW+, swizzling is not used. We leave the CPU memory
+ * controller in charge of optimizing memory accesses without
+ * the extra address manipulation GPU side.
+ *
+ * VLV and CHV don't have GPU swizzling.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (GRAPHICS_VER(i915) >= 6) {
+ if (i915->preserve_bios_swizzle) {
+ if (intel_uncore_read(uncore, DISP_ARB_CTL) &
+ DISP_TILE_SURFACE_SWIZZLING) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ } else {
+ u32 dimm_c0, dimm_c1;
+
+ dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
+ dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /*
+ * Enable swizzling when the channels are populated
+ * with identically sized dimms. We don't need to check
+ * the 3rd channel because no cpu with gpu attached
+ * ships in that configuration. Also, swizzling only
+ * makes sense for 2 channels anyway.
+ */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ }
+ } else if (GRAPHICS_VER(i915) == 5) {
+ /*
+ * On Ironlake whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (GRAPHICS_VER(i915) == 2) {
+ /*
+ * As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
+ /*
+ * The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ *
+ * Reports indicate that the swizzling actually
+ * varies depending upon page placement inside the
+ * channels, i.e. we see swizzled pages where the
+ * banks of memory are paired and unswizzled on the
+ * uneven portion, so leave that as unknown.
+ */
+ if (intel_uncore_read16(uncore, C0DRB3_BW) ==
+ intel_uncore_read16(uncore, C1DRB3_BW)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ } else {
+ u32 dcc = intel_uncore_read(uncore, DCC);
+
+ /*
+ * On 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+ * 9 for Y tiled. The CPU's interleave is independent, and
+ * can be based on either bit 11 (haven't seen this yet) or
+ * bit 17 (common).
+ */
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /*
+ * This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Bit 11 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+ }
+ break;
+ }
+
+ /* check for L-shaped memory aka modified enhanced addressing */
+ if (GRAPHICS_VER(i915) == 4 &&
+ !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+
+ if (dcc == 0xffffffff) {
+ drm_err(&i915->drm, "Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ }
+
+ if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
+ swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ /*
+ * Userspace likes to explode if it sees unknown swizzling,
+ * so lie. We will finish the lie when reporting through
+ * the get-tiling-ioctl by reporting the physical swizzle
+ * mode as unknown instead.
+ *
+ * As we don't strictly know what the swizzling is, it may be
+ * bit17 dependent, and so we need to also prevent the pages
+ * from being moved.
+ */
+ i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES;
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+
+ to_gt(i915)->ggtt->bit_6_swizzle_x = swizzle_x;
+ to_gt(i915)->ggtt->bit_6_swizzle_y = swizzle_y;
+}
+
+/*
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void swizzle_page(struct page *page)
+{
+ char temp[64];
+ char *vaddr;
+ int i;
+
+ vaddr = kmap(page);
+
+ for (i = 0; i < PAGE_SIZE; i += 128) {
+ memcpy(temp, &vaddr[i], 64);
+ memcpy(&vaddr[i], &vaddr[i + 64], 64);
+ memcpy(&vaddr[i + 64], temp, 64);
+ }
+
+ kunmap(page);
+}
+
+/**
+ * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
+ *
+ * This function fixes up the swizzling in case any page frame number for this
+ * object has changed in bit 17 since that state has been saved with
+ * i915_gem_object_save_bit_17_swizzle().
+ *
+ * This is called when pinning backing storage again, since the kernel is free
+ * to move unpinned backing storage around (either by directly moving pages or
+ * by swapping them out and back in again).
+ */
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ int i;
+
+ if (obj->bit_17 == NULL)
+ return;
+
+ i = 0;
+ for_each_sgt_page(page, sgt_iter, pages) {
+ char new_bit_17 = page_to_phys(page) >> 17;
+
+ if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
+ swizzle_page(page);
+ set_page_dirty(page);
+ }
+
+ i++;
+ }
+}
+
+/**
+ * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
+ *
+ * This function saves the bit 17 of each page frame number so that swizzling
+ * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
+ * be called before the backing storage can be unpinned.
+ */
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ int i;
+
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
+ if (obj->bit_17 == NULL) {
+ DRM_ERROR("Failed to allocate memory for bit 17 "
+ "record\n");
+ return;
+ }
+ }
+
+ i = 0;
+
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (page_to_phys(page) & (1 << 17))
+ __set_bit(i, obj->bit_17);
+ else
+ __clear_bit(i, obj->bit_17);
+ i++;
+ }
+}
+
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ int num_fences;
+ int i;
+
+ INIT_LIST_HEAD(&ggtt->fence_list);
+ INIT_LIST_HEAD(&ggtt->userfault_list);
+
+ detect_bit_6_swizzle(ggtt);
+
+ if (!i915_ggtt_has_aperture(ggtt))
+ num_fences = 0;
+ else if (GRAPHICS_VER(i915) >= 7 &&
+ !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+ num_fences = 32;
+ else if (GRAPHICS_VER(i915) >= 4 ||
+ IS_I945G(i915) || IS_I945GM(i915) ||
+ IS_G33(i915) || IS_PINEVIEW(i915))
+ num_fences = 16;
+ else
+ num_fences = 8;
+
+ if (intel_vgpu_active(i915))
+ num_fences = intel_uncore_read(uncore,
+ vgtif_reg(avail_rs.fence_num));
+ ggtt->fence_regs = kcalloc(num_fences,
+ sizeof(*ggtt->fence_regs),
+ GFP_KERNEL);
+ if (!ggtt->fence_regs)
+ num_fences = 0;
+
+ /* Initialize fence registers to zero */
+ for (i = 0; i < num_fences; i++) {
+ struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+ i915_active_init(&fence->active, NULL, NULL, 0);
+ fence->ggtt = ggtt;
+ fence->id = i;
+ list_add_tail(&fence->link, &ggtt->fence_list);
+ }
+ ggtt->num_fences = num_fences;
+
+ intel_ggtt_restore_fences(ggtt);
+}
+
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
+{
+ int i;
+
+ for (i = 0; i < ggtt->num_fences; i++) {
+ struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+ i915_active_fini(&fence->active);
+ }
+
+ kfree(ggtt->fence_regs);
+}
+
+void intel_gt_init_swizzling(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
+ if (GRAPHICS_VER(i915) < 5 ||
+ to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
+
+ if (GRAPHICS_VER(i915) == 5)
+ return;
+
+ intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
+
+ if (GRAPHICS_VER(i915) == 6)
+ intel_uncore_write(uncore,
+ ARB_MODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else if (GRAPHICS_VER(i915) == 7)
+ intel_uncore_write(uncore,
+ ARB_MODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else if (GRAPHICS_VER(i915) == 8)
+ intel_uncore_write(uncore,
+ GAMTARBMODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+ else
+ MISSING_CASE(GRAPHICS_VER(i915));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
new file mode 100644
index 000000000..25340be5e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __INTEL_GGTT_FENCING_H__
+#define __INTEL_GGTT_FENCING_H__
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "i915_active.h"
+
+struct drm_i915_gem_object;
+struct i915_ggtt;
+struct i915_vma;
+struct intel_gt;
+struct sg_table;
+
+#define I965_FENCE_PAGE 4096UL
+
+struct i915_fence_reg {
+ struct list_head link;
+ struct i915_ggtt *ggtt;
+ struct i915_vma *vma;
+ atomic_t pin_count;
+ struct i915_active active;
+ int id;
+ /**
+ * Whether the tiling parameters for the currently
+ * associated fence register have changed. Note that
+ * for the purposes of tracking tiling changes we also
+ * treat the unfenced register, the register slot that
+ * the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ bool dirty;
+ u32 start;
+ u32 size;
+ u32 tiling;
+ u32 stride;
+};
+
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
+void i915_unreserve_fence(struct i915_fence_reg *fence);
+
+void intel_ggtt_restore_fences(struct i915_ggtt *ggtt);
+
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+void intel_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_ggtt_fini_fences(struct i915_ggtt *ggtt);
+
+void intel_gt_init_swizzling(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
new file mode 100644
index 000000000..4e2163a1a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "intel_ggtt_gmch.h"
+
+#include <drm/intel-gtt.h>
+#include <drm/i915_drm.h>
+
+#include <linux/agp_backend.h>
+
+#include "i915_drv.h"
+#include "i915_utils.h"
+#include "intel_gtt.h"
+#include "intel_gt_regs.h"
+#include "intel_gt.h"
+
+static void gmch_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
+ flags);
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ intel_gmch_gtt_flush();
+}
+
+static void gmch_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static void gmch_ggtt_remove(struct i915_address_space *vm)
+{
+ intel_gmch_remove();
+}
+
+/*
+ * Certain Gen5 chipsets require idling the GPU before unmapping anything from
+ * the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+ /*
+ * Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (!i915_vtd_active(i915))
+ return false;
+
+ if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
+ return true;
+
+ return false;
+}
+
+int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ phys_addr_t gmadr_base;
+ int ret;
+
+ ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
+ if (!ret) {
+ drm_err(&i915->drm, "failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+ ggtt->gmadr =
+ (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
+ ggtt->vm.insert_page = gmch_ggtt_insert_page;
+ ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
+ ggtt->vm.clear_range = gmch_ggtt_clear_range;
+ ggtt->vm.cleanup = gmch_ggtt_remove;
+
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ if (unlikely(ggtt->do_idle_maps))
+ drm_notice(&i915->drm,
+ "Applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
+{
+ if (!intel_gmch_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
+void intel_ggtt_gmch_flush(void)
+{
+ intel_gmch_gtt_flush();
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h
new file mode 100644
index 000000000..370bf321b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GGTT_GMCH_H__
+#define __INTEL_GGTT_GMCH_H__
+
+#include "intel_gtt.h"
+
+/* For x86 platforms */
+#if IS_ENABLED(CONFIG_X86)
+
+void intel_ggtt_gmch_flush(void);
+int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915);
+int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt);
+
+/* Stubs for non-x86 platforms */
+#else
+
+static inline void intel_ggtt_gmch_flush(void) { }
+static inline int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915) { return -ENODEV; }
+static inline int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt) { return -ENODEV; }
+
+#endif
+
+#endif /* __INTEL_GGTT_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
new file mode 100644
index 000000000..25ea5f8a4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: MIT*/
+/*
+ * Copyright © 2003-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_GPU_COMMANDS_H_
+#define _INTEL_GPU_COMMANDS_H_
+
+#include <linux/bitops.h>
+
+/*
+ * Target address alignments required for GPU access e.g.
+ * MI_STORE_DWORD_IMM.
+ */
+#define alignof_dword 4
+#define alignof_qword 8
+
+/*
+ * Instruction field definitions used by the command parser
+ */
+#define INSTR_CLIENT_SHIFT 29
+#define INSTR_MI_CLIENT 0x0
+#define INSTR_BC_CLIENT 0x2
+#define INSTR_RC_CLIENT 0x3
+#define INSTR_SUBCLIENT_SHIFT 27
+#define INSTR_SUBCLIENT_MASK 0x18000000
+#define INSTR_MEDIA_SUBCLIENT 0x2
+#define INSTR_26_TO_24_MASK 0x7000000
+#define INSTR_26_TO_24_SHIFT 24
+
+#define __INSTR(client) ((client) << INSTR_CLIENT_SHIFT)
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) \
+ (__INSTR(INSTR_MI_CLIENT) | (opcode) << 23 | (flags))
+/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
+#define MI_GLOBAL_GTT (1<<22)
+
+#define MI_NOOP MI_INSTR(0, 0)
+#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
+#define MI_SET_PREDICATE_DISABLE (0 << 0)
+#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
+#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH MI_INSTR(0x04, 0)
+#define MI_READ_FLUSH (1 << 0)
+#define MI_EXE_FLUSH (1 << 1)
+#define MI_NO_WRITE_FLUSH (1 << 2)
+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
+#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
+#define MI_SUSPEND_FLUSH_EN (1<<0)
+#define MI_SET_APPID MI_INSTR(0x0e, 0)
+#define MI_SET_APPID_SESSION_ID(x) ((x) << 0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
+#define MI_OVERLAY_CONTINUE (0x0<<21)
+#define MI_OVERLAY_ON (0x1<<21)
+#define MI_OVERLAY_OFF (0x2<<21)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
+#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+/* SKL ones */
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
+#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
+#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
+#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
+#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
+#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
+#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
+#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
+#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
+#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
+#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
+#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
+#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
+#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
+#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+#define MI_SEMAPHORE_SYNC_MASK (3<<16)
+#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
+#define MI_MM_SPACE_GTT (1<<8)
+#define MI_MM_SPACE_PHYSICAL (0<<8)
+#define MI_SAVE_EXT_STATE_EN (1<<3)
+#define MI_RESTORE_EXT_STATE_EN (1<<2)
+#define MI_FORCE_RESTORE (1<<1)
+#define MI_RESTORE_INHIBIT (1<<0)
+#define HSW_MI_RS_SAVE_STATE_EN (1<<3)
+#define HSW_MI_RS_RESTORE_STATE_EN (1<<2)
+#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
+#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
+#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
+#define MI_SEMAPHORE_REGISTER_POLL (1 << 16)
+#define MI_SEMAPHORE_POLL (1 << 15)
+#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
+#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12)
+#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
+#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
+#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
+#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5)
+#define MI_SEMAPHORE_TOKEN_SHIFT 5
+#define MI_STORE_DATA_IMM MI_INSTR(0x20, 0)
+#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
+#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
+#define MI_STORE_QWORD_IMM_GEN8 (MI_INSTR(0x20, 3) | REG_BIT(21))
+#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
+#define MI_USE_GGTT (1 << 22) /* g4x+ */
+#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_ATOMIC MI_INSTR(0x2f, 1)
+#define MI_ATOMIC_INLINE (MI_INSTR(0x2f, 9) | MI_ATOMIC_INLINE_DATA)
+#define MI_ATOMIC_GLOBAL_GTT (1 << 22)
+#define MI_ATOMIC_INLINE_DATA (1 << 18)
+#define MI_ATOMIC_CS_STALL (1 << 17)
+#define MI_ATOMIC_MOVE (0x4 << 8)
+
+/*
+ * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
+#define MI_LRI_LRM_CS_MMIO REG_BIT(19)
+#define MI_LRI_MMIO_REMAP_EN REG_BIT(17)
+#define MI_LRI_FORCE_POSTED (1<<12)
+#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
+#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
+#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
+#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
+#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_FLUSH_DW_PROTECTED_MEM_EN (1 << 22)
+#define MI_FLUSH_DW_STORE_INDEX (1<<21)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_CCS (1<<16)
+#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_FLUSH_DW_OP_MASK (3<<14)
+#define MI_FLUSH_DW_LLC (1<<9)
+#define MI_FLUSH_DW_NOTIFY (1<<8)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_USE_GTT (1<<2)
+#define MI_FLUSH_DW_USE_PPGTT (0<<2)
+#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
+#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
+#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
+#define MI_LRR_SOURCE_CS_MMIO REG_BIT(18)
+#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
+#define MI_BATCH_NON_SECURE (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_PPGTT_HSW (1<<8)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
+#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
+#define MI_BATCH_RESOURCE_STREAMER REG_BIT(10)
+#define MI_BATCH_PREDICATE REG_BIT(15) /* HSW+ on RCS only*/
+
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
+#define GEN9_MEDIA_POOL_ENABLE (1 << 31)
+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR (0x1<<1)
+#define SC_ENABLE_MASK (0x1<<0)
+#define SC_ENABLE (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK (0xffff<<16)
+#define SCI_XMIN_MASK (0xffff<<0)
+#define SCI_YMAX_MASK (0xffff<<16)
+#define SCI_XMAX_MASK (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+
+#define XY_CTRL_SURF_INSTR_SIZE 5
+#define MI_FLUSH_DW_SIZE 3
+#define XY_CTRL_SURF_COPY_BLT ((2 << 29) | (0x48 << 22) | 3)
+#define SRC_ACCESS_TYPE_SHIFT 21
+#define DST_ACCESS_TYPE_SHIFT 20
+#define CCS_SIZE_MASK 0x3FF
+#define CCS_SIZE_SHIFT 8
+#define XY_CTRL_SURF_MOCS_MASK GENMASK(31, 25)
+#define NUM_CCS_BYTES_PER_BLOCK 256
+#define NUM_BYTES_PER_CCS_BYTE 256
+#define NUM_CCS_BLKS_PER_XFER 1024
+#define INDIRECT_ACCESS 0
+#define DIRECT_ACCESS 1
+
+#define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2))
+#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
+#define XY_FAST_COLOR_BLT_CMD (2 << 29 | 0x44 << 22)
+#define XY_FAST_COLOR_BLT_DEPTH_32 (2 << 19)
+#define XY_FAST_COLOR_BLT_DW 16
+#define XY_FAST_COLOR_BLT_MOCS_MASK GENMASK(27, 21)
+#define XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31
+
+#define XY_FAST_COPY_BLT_D0_SRC_TILING_MASK REG_GENMASK(21, 20)
+#define XY_FAST_COPY_BLT_D0_DST_TILING_MASK REG_GENMASK(14, 13)
+#define XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(mode) \
+ REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_SRC_TILING_MASK, mode)
+#define XY_FAST_COPY_BLT_D0_DST_TILE_MODE(mode) \
+ REG_FIELD_PREP(XY_FAST_COPY_BLT_D0_DST_TILING_MASK, mode)
+#define LINEAR 0
+#define TILE_X 0x1
+#define XMAJOR 0x1
+#define YMAJOR 0x2
+#define TILE_64 0x3
+#define XY_FAST_COPY_BLT_D1_SRC_TILE4 REG_BIT(31)
+#define XY_FAST_COPY_BLT_D1_DST_TILE4 REG_BIT(30)
+#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0)
+#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8)
+/* Note: MOCS value = (index << 1) */
+#define BLIT_CCTL_SRC_MOCS(idx) \
+ REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (idx) << 1)
+#define BLIT_CCTL_DST_MOCS(idx) \
+ REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (idx) << 1)
+
+#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22)
+#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
+#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22)
+#define XY_MONO_SRC_COPY_IMM_BLT (2 << 29 | 0x71 << 22 | 5)
+#define BLT_WRITE_A (2<<20)
+#define BLT_WRITE_RGB (1<<20)
+#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
+#define BLT_DEPTH_8 (0<<24)
+#define BLT_DEPTH_16_565 (1<<24)
+#define BLT_DEPTH_16_1555 (2<<24)
+#define BLT_DEPTH_32 (3<<24)
+#define BLT_ROP_SRC_COPY (0xcc<<16)
+#define BLT_ROP_COLOR_COPY (0xf0<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP (1<<22)
+#define DISPLAY_PLANE_A (0<<20)
+#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29) /* gen11+ */
+#define PIPE_CONTROL_TILE_CACHE_FLUSH (1<<28) /* gen11+ */
+#define PIPE_CONTROL_FLUSH_L3 (1<<27)
+#define PIPE_CONTROL_AMFS_FLUSH (1<<25) /* gen12+ */
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
+#define PIPE_CONTROL_MMIO_WRITE (1<<23)
+#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
+#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET (1<<19)
+#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_PSD_SYNC (1<<17) /* gen11+ */
+#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
+#define PIPE_CONTROL_WRITE_TIMESTAMP (3<<14)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
+#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
+#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
+#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
+#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
+#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
+#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
+#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
+#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
+#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+/*
+ * 3D-related flags that can't be set on _engines_ that lack access to the 3D
+ * pipeline (i.e., CCS engines).
+ */
+#define PIPE_CONTROL_3D_ENGINE_FLAGS (\
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
+ PIPE_CONTROL_TILE_CACHE_FLUSH | \
+ PIPE_CONTROL_DEPTH_STALL | \
+ PIPE_CONTROL_STALL_AT_SCOREBOARD | \
+ PIPE_CONTROL_PSD_SYNC | \
+ PIPE_CONTROL_AMFS_FLUSH | \
+ PIPE_CONTROL_VF_CACHE_INVALIDATE | \
+ PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET)
+
+/* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */
+#define PIPE_CONTROL_3D_ARCH_FLAGS ( \
+ PIPE_CONTROL_3D_ENGINE_FLAGS | \
+ PIPE_CONTROL_INDIRECT_STATE_DISABLE | \
+ PIPE_CONTROL_FLUSH_ENABLE | \
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
+ PIPE_CONTROL_DC_FLUSH_ENABLE)
+
+#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1)
+#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
+/* Opcodes for MI_MATH_INSTR */
+#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0)
+#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2)
+#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2)
+#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1)
+#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1)
+#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0)
+#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0)
+#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0)
+#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0)
+#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0)
+#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2)
+#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2)
+/* Registers used as operands in MI_MATH_INSTR */
+#define MI_MATH_REG(x) (x)
+#define MI_MATH_REG_SRCA 0x20
+#define MI_MATH_REG_SRCB 0x21
+#define MI_MATH_REG_ACCU 0x31
+#define MI_MATH_REG_ZF 0x32
+#define MI_MATH_REG_CF 0x33
+
+/*
+ * Media instructions used by the kernel
+ */
+#define MEDIA_INSTR(pipe, op, sub_op, flags) \
+ (__INSTR(INSTR_RC_CLIENT) | (pipe) << INSTR_SUBCLIENT_SHIFT | \
+ (op) << INSTR_26_TO_24_SHIFT | (sub_op) << 16 | (flags))
+
+#define MFX_WAIT MEDIA_INSTR(1, 0, 0, 0)
+#define MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG REG_BIT(8)
+#define MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG REG_BIT(9)
+
+#define CRYPTO_KEY_EXCHANGE MEDIA_INSTR(2, 6, 9, 0)
+
+/*
+ * Commands used only by the command parser
+ */
+#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
+#define MI_ARB_CHECK MI_INSTR(0x05, 0)
+#define MI_RS_CONTROL MI_INSTR(0x06, 0)
+#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
+#define MI_PREDICATE MI_INSTR(0x0C, 0)
+#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
+#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
+#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
+#define MI_URB_CLEAR MI_INSTR(0x19, 0)
+#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
+#define MI_CLFLUSH MI_INSTR(0x27, 0)
+#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
+#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
+#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
+#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
+#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
+#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+
+#define STATE_BASE_ADDRESS \
+ ((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
+#define BASE_ADDRESS_MODIFY REG_BIT(0)
+#define PIPELINE_SELECT \
+ ((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16))
+#define PIPELINE_SELECT_MEDIA REG_BIT(0)
+#define GFX_OP_3DSTATE_VF_STATISTICS \
+ ((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16))
+#define MEDIA_VFE_STATE \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16))
+#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define MEDIA_INTERFACE_DESCRIPTOR_LOAD \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16))
+#define MEDIA_OBJECT \
+ ((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16))
+#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
+#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
+#define GFX_OP_3DSTATE_SO_DECL_LIST \
+ ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
+
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
+ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
+
+#define COLOR_BLT ((0x2<<29)|(0x40<<22))
+#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
+
+/*
+ * Used to convert any address to canonical form.
+ * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
+ * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
+ * addresses to be in a canonical form:
+ * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
+ * canonical form [63:48] == [47]."
+ */
+#define GEN8_HIGH_ADDRESS_BIT 47
+static inline u64 gen8_canonical_addr(u64 address)
+{
+ return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
+}
+
+static inline u64 gen8_noncanonical_addr(u64 address)
+{
+ return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
+}
+
+static inline u32 *__gen6_emit_bb_start(u32 *cs, u32 addr, unsigned int flags)
+{
+ *cs++ = MI_BATCH_BUFFER_START | flags;
+ *cs++ = addr;
+
+ return cs;
+}
+
+#endif /* _INTEL_GPU_COMMANDS_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
new file mode 100644
index 000000000..7af6db319
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/irq.h>
+#include <linux/mei_aux.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "gem/i915_gem_region.h"
+#include "gt/intel_gsc.h"
+#include "gt/intel_gt.h"
+
+#define GSC_BAR_LENGTH 0x00000FFC
+
+static void gsc_irq_mask(struct irq_data *d)
+{
+ /* generic irq handling */
+}
+
+static void gsc_irq_unmask(struct irq_data *d)
+{
+ /* generic irq handling */
+}
+
+static struct irq_chip gsc_irq_chip = {
+ .name = "gsc_irq_chip",
+ .irq_mask = gsc_irq_mask,
+ .irq_unmask = gsc_irq_unmask,
+};
+
+static int gsc_irq_init(int irq)
+{
+ irq_set_chip_and_handler_name(irq, &gsc_irq_chip,
+ handle_simple_irq, "gsc_irq_handler");
+
+ return irq_set_chip_data(irq, NULL);
+}
+
+static int
+gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
+{
+ struct intel_gt *gt = gsc_to_gt(gsc);
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_lmem(gt->i915, size,
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_CPU_CLEAR);
+ if (IS_ERR(obj)) {
+ drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n");
+ return PTR_ERR(obj);
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n");
+ goto out_put;
+ }
+
+ intf->gem_obj = obj;
+
+ return 0;
+
+out_put:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
+{
+ struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
+
+ if (!obj)
+ return;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+
+ i915_gem_object_put(obj);
+}
+
+struct gsc_def {
+ const char *name;
+ unsigned long bar;
+ size_t bar_size;
+ bool use_polling;
+ bool slow_firmware;
+ size_t lmem_size;
+};
+
+/* gsc resources and definitions (HECI1 and HECI2) */
+static const struct gsc_def gsc_def_dg1[] = {
+ {
+ /* HECI1 not yet implemented. */
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG1_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ }
+};
+
+static const struct gsc_def gsc_def_xehpsdv[] = {
+ {
+ /* HECI1 not enabled on the device. */
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG1_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ .use_polling = true,
+ .slow_firmware = true,
+ }
+};
+
+static const struct gsc_def gsc_def_dg2[] = {
+ {
+ .name = "mei-gsc",
+ .bar = DG2_GSC_HECI1_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ .lmem_size = SZ_4M,
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG2_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ }
+};
+
+static void gsc_release_dev(struct device *dev)
+{
+ struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
+ struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
+
+ kfree(adev);
+}
+
+static void gsc_destroy_one(struct drm_i915_private *i915,
+ struct intel_gsc *gsc, unsigned int intf_id)
+{
+ struct intel_gsc_intf *intf = &gsc->intf[intf_id];
+
+ if (intf->adev) {
+ auxiliary_device_delete(&intf->adev->aux_dev);
+ auxiliary_device_uninit(&intf->adev->aux_dev);
+ intf->adev = NULL;
+ }
+
+ if (intf->irq >= 0)
+ irq_free_desc(intf->irq);
+ intf->irq = -1;
+
+ gsc_ext_om_destroy(intf);
+}
+
+static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
+ unsigned int intf_id)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct mei_aux_device *adev;
+ struct auxiliary_device *aux_dev;
+ const struct gsc_def *def;
+ struct intel_gsc_intf *intf = &gsc->intf[intf_id];
+ int ret;
+
+ intf->irq = -1;
+ intf->id = intf_id;
+
+ if (intf_id == 0 && !HAS_HECI_PXP(i915))
+ return;
+
+ if (IS_DG1(i915)) {
+ def = &gsc_def_dg1[intf_id];
+ } else if (IS_XEHPSDV(i915)) {
+ def = &gsc_def_xehpsdv[intf_id];
+ } else if (IS_DG2(i915)) {
+ def = &gsc_def_dg2[intf_id];
+ } else {
+ drm_warn_once(&i915->drm, "Unknown platform\n");
+ return;
+ }
+
+ if (!def->name) {
+ drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1);
+ return;
+ }
+
+ /* skip irq initialization */
+ if (def->use_polling)
+ goto add_device;
+
+ intf->irq = irq_alloc_desc(0);
+ if (intf->irq < 0) {
+ drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
+ goto fail;
+ }
+
+ ret = gsc_irq_init(intf->irq);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc irq init failed %d\n", ret);
+ goto fail;
+ }
+
+add_device:
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ goto fail;
+
+ if (def->lmem_size) {
+ drm_dbg(&i915->drm, "setting up GSC lmem\n");
+
+ if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
+ drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
+ kfree(adev);
+ goto fail;
+ }
+
+ adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
+ adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
+ }
+
+ adev->irq = intf->irq;
+ adev->bar.parent = &pdev->resource[0];
+ adev->bar.start = def->bar + pdev->resource[0].start;
+ adev->bar.end = adev->bar.start + def->bar_size - 1;
+ adev->bar.flags = IORESOURCE_MEM;
+ adev->bar.desc = IORES_DESC_NONE;
+ adev->slow_firmware = def->slow_firmware;
+
+ aux_dev = &adev->aux_dev;
+ aux_dev->name = def->name;
+ aux_dev->id = (pci_domain_nr(pdev->bus) << 16) |
+ PCI_DEVID(pdev->bus->number, pdev->devfn);
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = gsc_release_dev;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc aux init failed %d\n", ret);
+ kfree(adev);
+ goto fail;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc aux add failed %d\n", ret);
+ /* adev will be freed with the put_device() and .release sequence */
+ auxiliary_device_uninit(aux_dev);
+ goto fail;
+ }
+ intf->adev = adev;
+
+ return;
+fail:
+ gsc_destroy_one(i915, gsc, intf->id);
+}
+
+static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
+{
+ int ret;
+
+ if (intf_id >= INTEL_GSC_NUM_INTERFACES) {
+ drm_warn_once(&gt->i915->drm, "GSC irq: intf_id %d is out of range", intf_id);
+ return;
+ }
+
+ if (!HAS_HECI_GSC(gt->i915)) {
+ drm_warn_once(&gt->i915->drm, "GSC irq: not supported");
+ return;
+ }
+
+ if (gt->gsc.intf[intf_id].irq < 0)
+ return;
+
+ ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
+ if (ret)
+ drm_err_ratelimited(&gt->i915->drm, "error handling GSC irq: %d\n", ret);
+}
+
+void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir)
+{
+ if (iir & GSC_IRQ_INTF(0))
+ gsc_irq_handler(gt, 0);
+ if (iir & GSC_IRQ_INTF(1))
+ gsc_irq_handler(gt, 1);
+}
+
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
+{
+ unsigned int i;
+
+ if (!HAS_HECI_GSC(i915))
+ return;
+
+ for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
+ gsc_init_one(i915, gsc, i);
+}
+
+void intel_gsc_fini(struct intel_gsc *gsc)
+{
+ struct intel_gt *gt = gsc_to_gt(gsc);
+ unsigned int i;
+
+ if (!HAS_HECI_GSC(gt->i915))
+ return;
+
+ for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
+ gsc_destroy_one(gt->i915, gsc, i);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
new file mode 100644
index 000000000..fcac1775e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
+ */
+#ifndef __INTEL_GSC_DEV_H__
+#define __INTEL_GSC_DEV_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_gt;
+struct mei_aux_device;
+
+#define INTEL_GSC_NUM_INTERFACES 2
+/*
+ * The HECI1 bit corresponds to bit15 and HECI2 to bit14.
+ * The reason for this is to allow growth for more interfaces in the future.
+ */
+#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+
+/**
+ * struct intel_gsc - graphics security controller
+ *
+ * @gem_obj: scratch memory GSC operations
+ * @intf : gsc interface
+ */
+struct intel_gsc {
+ struct intel_gsc_intf {
+ struct mei_aux_device *adev;
+ struct drm_i915_gem_object *gem_obj;
+ int irq;
+ unsigned int id;
+ } intf[INTEL_GSC_NUM_INTERFACES];
+};
+
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv);
+void intel_gsc_fini(struct intel_gsc *gsc);
+void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir);
+
+#endif /* __INTEL_GSC_DEV_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
new file mode 100644
index 000000000..91a005c46
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -0,0 +1,1095 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+#include <drm/intel-gtt.h>
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
+#include "pxp/intel_pxp.h"
+
+#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_regs.h"
+#include "intel_ggtt_gmch.h"
+#include "intel_gt.h"
+#include "intel_gt_buffer_pool.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_mcr.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_regs.h"
+#include "intel_gt_requests.h"
+#include "intel_migrate.h"
+#include "intel_mocs.h"
+#include "intel_pci_config.h"
+#include "intel_pm.h"
+#include "intel_rc6.h"
+#include "intel_renderstate.h"
+#include "intel_rps.h"
+#include "intel_sa_media.h"
+#include "intel_gt_sysfs.h"
+#include "intel_uncore.h"
+#include "shmem_utils.h"
+
+void intel_gt_common_init_early(struct intel_gt *gt)
+{
+ spin_lock_init(gt->irq_lock);
+
+ INIT_LIST_HEAD(&gt->closed_vma);
+ spin_lock_init(&gt->closed_lock);
+
+ init_llist_head(&gt->watchdog.list);
+ INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
+
+ intel_gt_init_buffer_pool(gt);
+ intel_gt_init_reset(gt);
+ intel_gt_init_requests(gt);
+ intel_gt_init_timelines(gt);
+ mutex_init(&gt->tlb.invalidate_lock);
+ seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
+ intel_gt_pm_init_early(gt);
+
+ intel_uc_init_early(&gt->uc);
+ intel_rps_init_early(&gt->rps);
+}
+
+/* Preliminary initialization of Tile 0 */
+int intel_root_gt_init_early(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt = to_gt(i915);
+
+ gt->i915 = i915;
+ gt->uncore = &i915->uncore;
+ gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
+ if (!gt->irq_lock)
+ return -ENOMEM;
+
+ intel_gt_common_init_early(gt);
+
+ return 0;
+}
+
+static int intel_gt_probe_lmem(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ unsigned int instance = gt->info.id;
+ int id = INTEL_REGION_LMEM_0 + instance;
+ struct intel_memory_region *mem;
+ int err;
+
+ mem = intel_gt_setup_lmem(gt);
+ if (IS_ERR(mem)) {
+ err = PTR_ERR(mem);
+ if (err == -ENODEV)
+ return 0;
+
+ drm_err(&i915->drm,
+ "Failed to setup region(%d) type=%d\n",
+ err, INTEL_MEMORY_LOCAL);
+ return err;
+ }
+
+ mem->id = id;
+ mem->instance = instance;
+
+ intel_memory_region_set_name(mem, "local%u", mem->instance);
+
+ GEM_BUG_ON(!HAS_REGION(i915, id));
+ GEM_BUG_ON(i915->mm.regions[id]);
+ i915->mm.regions[id] = mem;
+
+ return 0;
+}
+
+int intel_gt_assign_ggtt(struct intel_gt *gt)
+{
+ gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
+
+ return gt->ggtt ? 0 : -ENOMEM;
+}
+
+int intel_gt_init_mmio(struct intel_gt *gt)
+{
+ intel_gt_init_clock_frequency(gt);
+
+ intel_uc_init_mmio(&gt->uc);
+ intel_sseu_info_init(gt);
+ intel_gt_mcr_init(gt);
+
+ return intel_engines_init_mmio(gt);
+}
+
+static void init_unused_ring(struct intel_gt *gt, u32 base)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_write(uncore, RING_CTL(base), 0);
+ intel_uncore_write(uncore, RING_HEAD(base), 0);
+ intel_uncore_write(uncore, RING_TAIL(base), 0);
+ intel_uncore_write(uncore, RING_START(base), 0);
+}
+
+static void init_unused_rings(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_I830(i915)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ init_unused_ring(gt, SRB2_BASE);
+ init_unused_ring(gt, SRB3_BASE);
+ } else if (GRAPHICS_VER(i915) == 2) {
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ } else if (GRAPHICS_VER(i915) == 3) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, PRB2_BASE);
+ }
+}
+
+int intel_gt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ gt->last_init_time = ktime_get();
+
+ /* Double layer security blanket, see i915_gem_init() */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
+ intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
+
+ if (IS_HASWELL(i915))
+ intel_uncore_write(uncore,
+ HSW_MI_PREDICATE_RESULT_2,
+ IS_HSW_GT3(i915) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(gt);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(gt, "init");
+
+ intel_gt_init_swizzling(gt);
+
+ /*
+ * At least 830 can leave some of the unused rings
+ * "active" (ie. head != tail) after resume which
+ * will prevent c3 entry. Makes sure all unused rings
+ * are totally idle.
+ */
+ init_unused_rings(gt);
+
+ ret = i915_ppgtt_init_hw(gt);
+ if (ret) {
+ DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
+ goto out;
+ }
+
+ /* We can't enable contexts until all firmware is loaded */
+ ret = intel_uc_init_hw(&gt->uc);
+ if (ret) {
+ i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
+ goto out;
+ }
+
+ intel_mocs_init(gt);
+
+out:
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ return ret;
+}
+
+static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw(uncore, reg, 0, set);
+}
+
+static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw(uncore, reg, clr, 0);
+}
+
+static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ intel_uncore_rmw(uncore, reg, 0, 0);
+}
+
+static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
+{
+ GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
+ GEN6_RING_FAULT_REG_POSTING_READ(engine);
+}
+
+void
+intel_gt_clear_error_registers(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 eir;
+
+ if (GRAPHICS_VER(i915) != 2)
+ clear_register(uncore, PGTBL_ER);
+
+ if (GRAPHICS_VER(i915) < 4)
+ clear_register(uncore, IPEIR(RENDER_RING_BASE));
+ else
+ clear_register(uncore, IPEIR_I965);
+
+ clear_register(uncore, EIR);
+ eir = intel_uncore_read(uncore, EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
+ rmw_set(uncore, EMR, eir);
+ intel_uncore_write(uncore, GEN2_IIR,
+ I915_MASTER_ERROR_INTERRUPT);
+ }
+
+ if (GRAPHICS_VER(i915) >= 12) {
+ rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
+ } else if (GRAPHICS_VER(i915) >= 8) {
+ rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
+ } else if (GRAPHICS_VER(i915) >= 6) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine_masked(engine, gt, engine_mask, id)
+ gen6_clear_engine_error_register(engine);
+ }
+}
+
+static void gen6_check_faults(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 fault;
+
+ for_each_engine(engine, gt, id) {
+ fault = GEN6_RING_FAULT_REG_READ(engine);
+ if (fault & RING_FAULT_VALID) {
+ drm_dbg(&engine->i915->drm, "Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+ }
+}
+
+static void gen8_check_faults(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
+ u32 fault;
+
+ if (GRAPHICS_VER(gt->i915) >= 12) {
+ fault_reg = GEN12_RING_FAULT_REG;
+ fault_data0_reg = GEN12_FAULT_TLB_DATA0;
+ fault_data1_reg = GEN12_FAULT_TLB_DATA1;
+ } else {
+ fault_reg = GEN8_RING_FAULT_REG;
+ fault_data0_reg = GEN8_FAULT_TLB_DATA0;
+ fault_data1_reg = GEN8_FAULT_TLB_DATA1;
+ }
+
+ fault = intel_uncore_read(uncore, fault_reg);
+ if (fault & RING_FAULT_VALID) {
+ u32 fault_data0, fault_data1;
+ u64 fault_addr;
+
+ fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
+ fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
+
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+}
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (GRAPHICS_VER(i915) >= 8)
+ gen8_check_faults(gt);
+ else if (GRAPHICS_VER(i915) >= 6)
+ gen6_check_faults(gt);
+ else
+ return;
+
+ intel_gt_clear_error_registers(gt, ALL_ENGINES);
+}
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ intel_wakeref_t wakeref;
+
+ /*
+ * No actual flushing is required for the GTT write domain for reads
+ * from the GTT domain. Writes to it "immediately" go to main memory
+ * as far as we know, so there's no chipset flush. It also doesn't
+ * land in the GPU render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
+ *
+ * We also have to wait a bit for the writes to land from the GTT.
+ * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+ * timing. This issue has only been observed when switching quickly
+ * between GTT writes and CPU reads from inside the kernel on recent hw,
+ * and it appears to only affect discrete GTT blocks (i.e. on LLC
+ * system agents we cannot reproduce this behaviour, until Cannonlake
+ * that was!).
+ */
+
+ wmb();
+
+ if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
+ return;
+
+ intel_gt_chipset_flush(gt);
+
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_posting_read_fw(uncore,
+ RING_HEAD(RENDER_RING_BASE));
+ spin_unlock_irqrestore(&uncore->lock, flags);
+ }
+}
+
+void intel_gt_chipset_flush(struct intel_gt *gt)
+{
+ wmb();
+ if (GRAPHICS_VER(gt->i915) < 6)
+ intel_ggtt_gmch_flush();
+}
+
+void intel_gt_driver_register(struct intel_gt *gt)
+{
+ intel_gsc_init(&gt->gsc, gt->i915);
+
+ intel_rps_driver_register(&gt->rps);
+
+ intel_gt_debugfs_register(gt);
+ intel_gt_sysfs_register(gt);
+}
+
+static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_lmem(i915, size,
+ I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ gt->scratch = i915_vma_make_unshrinkable(vma);
+
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void intel_gt_fini_scratch(struct intel_gt *gt)
+{
+ i915_vma_unpin_and_release(&gt->scratch, 0);
+}
+
+static struct i915_address_space *kernel_vm(struct intel_gt *gt)
+{
+ if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
+ return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
+ else
+ return i915_vm_get(&gt->ggtt->vm);
+}
+
+static int __engines_record_defaults(struct intel_gt *gt)
+{
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * As we reset the gpu during very early sanitisation, the current
+ * register state on the GPU should reflect its defaults values.
+ * We load a context onto the hw (with restore-inhibit), then switch
+ * over to a second context to save that default register state. We
+ * can then prime every new context with that state so they all start
+ * from the same default HW values.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_renderstate so;
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ /* We must be able to switch to something! */
+ GEM_BUG_ON(!engine->kernel_context);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ err = intel_renderstate_init(&so, ce);
+ if (err)
+ goto err;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_fini;
+ }
+
+ err = intel_engine_emit_ctx_wa(rq);
+ if (err)
+ goto err_rq;
+
+ err = intel_renderstate_emit(&so, rq);
+ if (err)
+ goto err_rq;
+
+err_rq:
+ requests[id] = i915_request_get(rq);
+ i915_request_add(rq);
+err_fini:
+ intel_renderstate_fini(&so, ce);
+err:
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+ }
+
+ /* Flush the default context image to memory, and enable powersaving. */
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct i915_request *rq;
+ struct file *state;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ if (rq->fence.error) {
+ err = -EIO;
+ goto out;
+ }
+
+ GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
+ if (!rq->context->state)
+ continue;
+
+ /* Keep a copy of the state's backing pages; free the obj */
+ state = shmem_create_from_object(rq->context->state->obj);
+ if (IS_ERR(state)) {
+ err = PTR_ERR(state);
+ goto out;
+ }
+ rq->engine->default_state = state;
+ }
+
+out:
+ /*
+ * If we have to abandon now, we expect the engines to be idle
+ * and ready to be torn-down. The quickest way we can accomplish
+ * this is by declaring ourselves wedged.
+ */
+ if (err)
+ intel_gt_set_wedged(gt);
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ ce = rq->context;
+ i915_request_put(rq);
+ intel_context_put(ce);
+ }
+ return err;
+}
+
+static int __engines_verify_workarounds(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_verify_workarounds(engine, "load"))
+ err = -EIO;
+ }
+
+ /* Flush and restore the kernel context for safety */
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
+ err = -EIO;
+
+ return err;
+}
+
+static void __intel_gt_disable(struct intel_gt *gt)
+{
+ intel_gt_set_wedged_on_fini(gt);
+
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+}
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
+{
+ long remaining_timeout;
+
+ /* If the device is asleep, we have no requests outstanding */
+ if (!intel_gt_pm_is_awake(gt))
+ return 0;
+
+ while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
+ &remaining_timeout)) > 0) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ if (timeout)
+ return timeout;
+
+ if (remaining_timeout < 0)
+ remaining_timeout = 0;
+
+ return intel_uc_wait_for_idle(&gt->uc, remaining_timeout);
+}
+
+int intel_gt_init(struct intel_gt *gt)
+{
+ int err;
+
+ err = i915_inject_probe_error(gt->i915, -ENODEV);
+ if (err)
+ return err;
+
+ intel_gt_init_workarounds(gt);
+
+ /*
+ * This is just a security blanket to placate dragons.
+ * On some systems, we very sporadically observe that the first TLBs
+ * used by the CS may be stale, despite us poking the TLB reset. If
+ * we hold the forcewake during initialisation these problems
+ * just magically go away.
+ */
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ err = intel_gt_init_scratch(gt,
+ GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
+ if (err)
+ goto out_fw;
+
+ intel_gt_pm_init(gt);
+
+ gt->vm = kernel_vm(gt);
+ if (!gt->vm) {
+ err = -ENOMEM;
+ goto err_pm;
+ }
+
+ intel_set_mocs_index(gt);
+
+ err = intel_engines_init(gt);
+ if (err)
+ goto err_engines;
+
+ err = intel_uc_init(&gt->uc);
+ if (err)
+ goto err_engines;
+
+ err = intel_gt_resume(gt);
+ if (err)
+ goto err_uc_init;
+
+ err = intel_gt_init_hwconfig(gt);
+ if (err)
+ drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
+ ERR_PTR(err));
+
+ err = __engines_record_defaults(gt);
+ if (err)
+ goto err_gt;
+
+ err = __engines_verify_workarounds(gt);
+ if (err)
+ goto err_gt;
+
+ err = i915_inject_probe_error(gt->i915, -EIO);
+ if (err)
+ goto err_gt;
+
+ intel_uc_init_late(&gt->uc);
+
+ intel_migrate_init(&gt->migrate, gt);
+
+ intel_pxp_init(&gt->pxp);
+
+ goto out_fw;
+err_gt:
+ __intel_gt_disable(gt);
+ intel_uc_fini_hw(&gt->uc);
+err_uc_init:
+ intel_uc_fini(&gt->uc);
+err_engines:
+ intel_engines_release(gt);
+ i915_vm_put(fetch_and_zero(&gt->vm));
+err_pm:
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+out_fw:
+ if (err)
+ intel_gt_set_wedged_on_init(gt);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ return err;
+}
+
+void intel_gt_driver_remove(struct intel_gt *gt)
+{
+ __intel_gt_disable(gt);
+
+ intel_migrate_fini(&gt->migrate);
+ intel_uc_driver_remove(&gt->uc);
+
+ intel_engines_release(gt);
+
+ intel_gt_flush_buffer_pool(gt);
+}
+
+void intel_gt_driver_unregister(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+
+ intel_gt_sysfs_unregister(gt);
+ intel_rps_driver_unregister(&gt->rps);
+ intel_gsc_fini(&gt->gsc);
+
+ intel_pxp_fini(&gt->pxp);
+
+ /*
+ * Upon unregistering the device to prevent any new users, cancel
+ * all in-flight requests so that we can quickly unbind the active
+ * resources.
+ */
+ intel_gt_set_wedged_on_fini(gt);
+
+ /* Scrub all HW state upon release */
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ __intel_gt_reset(gt, ALL_ENGINES);
+}
+
+void intel_gt_driver_release(struct intel_gt *gt)
+{
+ struct i915_address_space *vm;
+
+ vm = fetch_and_zero(&gt->vm);
+ if (vm) /* FIXME being called twice on error paths :( */
+ i915_vm_put(vm);
+
+ intel_wa_list_free(&gt->wa_list);
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+ intel_gt_fini_buffer_pool(gt);
+ intel_gt_fini_hwconfig(gt);
+}
+
+void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ unsigned int id;
+
+ /* We need to wait for inflight RCU frees to release their grip */
+ rcu_barrier();
+
+ for_each_gt(gt, i915, id) {
+ intel_uc_driver_late_release(&gt->uc);
+ intel_gt_fini_requests(gt);
+ intel_gt_fini_reset(gt);
+ intel_gt_fini_timelines(gt);
+ mutex_destroy(&gt->tlb.invalidate_lock);
+ intel_engines_free(gt);
+ }
+}
+
+static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
+{
+ int ret;
+
+ if (!gt_is_root(gt)) {
+ struct intel_uncore *uncore;
+ spinlock_t *irq_lock;
+
+ uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
+ if (!irq_lock)
+ return -ENOMEM;
+
+ gt->uncore = uncore;
+ gt->irq_lock = irq_lock;
+
+ intel_gt_common_init_early(gt);
+ }
+
+ intel_uncore_init_early(gt->uncore, gt);
+
+ ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
+ if (ret)
+ return ret;
+
+ gt->phys_addr = phys_addr;
+
+ return 0;
+}
+
+int intel_gt_probe_all(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_gt *gt = &i915->gt0;
+ const struct intel_gt_definition *gtdef;
+ phys_addr_t phys_addr;
+ unsigned int mmio_bar;
+ unsigned int i;
+ int ret;
+
+ mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
+ phys_addr = pci_resource_start(pdev, mmio_bar);
+
+ /*
+ * We always have at least one primary GT on any device
+ * and it has been already initialized early during probe
+ * in i915_driver_probe()
+ */
+ gt->i915 = i915;
+ gt->name = "Primary GT";
+ gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
+
+ drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ ret = intel_gt_tile_setup(gt, phys_addr);
+ if (ret)
+ return ret;
+
+ i915->gt[0] = gt;
+
+ if (!HAS_EXTRA_GT_LIST(i915))
+ return 0;
+
+ for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
+ gtdef->name != NULL;
+ i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
+ gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
+ if (!gt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ gt->i915 = i915;
+ gt->name = gtdef->name;
+ gt->type = gtdef->type;
+ gt->info.engine_mask = gtdef->engine_mask;
+ gt->info.id = i;
+
+ drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ if (GEM_WARN_ON(range_overflows_t(resource_size_t,
+ gtdef->mapping_base,
+ SZ_16M,
+ pci_resource_len(pdev, mmio_bar)))) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ switch (gtdef->type) {
+ case GT_TILE:
+ ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
+ break;
+
+ case GT_MEDIA:
+ ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
+ gtdef->gsi_offset);
+ break;
+
+ case GT_PRIMARY:
+ /* Primary GT should not appear in extra GT list */
+ default:
+ MISSING_CASE(gtdef->type);
+ ret = -ENODEV;
+ }
+
+ if (ret)
+ goto err;
+
+ i915->gt[i] = gt;
+ }
+
+ return 0;
+
+err:
+ i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+ return ret;
+}
+
+int intel_gt_tiles_init(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, i915, id) {
+ ret = intel_gt_probe_lmem(gt);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p)
+{
+ drm_printf(p, "available engines: %x\n", info->engine_mask);
+
+ intel_sseu_dump(&info->sseu, p);
+}
+
+struct reg_and_bit {
+ i915_reg_t reg;
+ u32 bit;
+};
+
+static struct reg_and_bit
+get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
+ const i915_reg_t *regs, const unsigned int num)
+{
+ const unsigned int class = engine->class;
+ struct reg_and_bit rb = { };
+
+ if (drm_WARN_ON_ONCE(&engine->i915->drm,
+ class >= num || !regs[class].reg))
+ return rb;
+
+ rb.reg = regs[class];
+ if (gen8 && class == VIDEO_DECODE_CLASS)
+ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
+ else
+ rb.bit = engine->instance;
+
+ rb.bit = BIT(rb.bit);
+
+ return rb;
+}
+
+static void mmio_invalidate_full(struct intel_gt *gt)
+{
+ static const i915_reg_t gen8_regs[] = {
+ [RENDER_CLASS] = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
+ [COPY_ENGINE_CLASS] = GEN8_BTCR,
+ };
+ static const i915_reg_t gen12_regs[] = {
+ [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ intel_engine_mask_t awake, tmp;
+ enum intel_engine_id id;
+ const i915_reg_t *regs;
+ unsigned int num = 0;
+
+ if (GRAPHICS_VER(i915) == 12) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (GRAPHICS_VER(i915) < 8) {
+ return;
+ }
+
+ if (drm_WARN_ONCE(&i915->drm, !num,
+ "Platform does not implement TLB invalidation!"))
+ return;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+
+ awake = 0;
+ for_each_engine(engine, gt, id) {
+ struct reg_and_bit rb;
+
+ if (!intel_engine_pm_is_awake(engine))
+ continue;
+
+ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+ if (!i915_mmio_reg_offset(rb.reg))
+ continue;
+
+ if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS ||
+ engine->class == COMPUTE_CLASS))
+ rb.bit = _MASKED_BIT_ENABLE(rb.bit);
+
+ intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ awake |= engine->mask;
+ }
+
+ GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+ /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+ if (awake &&
+ (IS_TIGERLAKE(i915) ||
+ IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) ||
+ IS_ALDERLAKE_P(i915)))
+ intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
+ spin_unlock_irq(&uncore->lock);
+
+ for_each_engine_masked(engine, gt, awake, tmp) {
+ struct reg_and_bit rb;
+
+ /*
+ * HW architecture suggest typical invalidation time at 40us,
+ * with pessimistic cases up to 100us and a recommendation to
+ * cap at 1ms. We go a bit higher just in case.
+ */
+ const unsigned int timeout_us = 100;
+ const unsigned int timeout_ms = 4;
+
+ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+ if (__intel_wait_for_register_fw(uncore,
+ rb.reg, rb.bit, 0,
+ timeout_us, timeout_ms,
+ NULL))
+ drm_err_ratelimited(&gt->i915->drm,
+ "%s TLB invalidation did not complete in %ums!\n",
+ engine->name, timeout_ms);
+ }
+
+ /*
+ * Use delayed put since a) we mostly expect a flurry of TLB
+ * invalidations so it is good to avoid paying the forcewake cost and
+ * b) it works around a bug in Icelake which cannot cope with too rapid
+ * transitions.
+ */
+ intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+ u32 cur = intel_gt_tlb_seqno(gt);
+
+ /* Only skip if a *full* TLB invalidate barrier has passed */
+ return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
+{
+ intel_wakeref_t wakeref;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (intel_gt_is_wedged(gt))
+ return;
+
+ if (tlb_seqno_passed(gt, seqno))
+ return;
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ mutex_lock(&gt->tlb.invalidate_lock);
+ if (tlb_seqno_passed(gt, seqno))
+ goto unlock;
+
+ mmio_invalidate_full(gt);
+
+ write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+ mutex_unlock(&gt->tlb.invalidate_lock);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
new file mode 100644
index 000000000..2ee582e28
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT__
+#define __INTEL_GT__
+
+#include "intel_engine_types.h"
+#include "intel_gt_types.h"
+#include "intel_reset.h"
+
+struct drm_i915_private;
+struct drm_printer;
+
+#define GT_TRACE(gt, fmt, ...) do { \
+ const struct intel_gt *gt__ __maybe_unused = (gt); \
+ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
+ ##__VA_ARGS__); \
+} while (0)
+
+static inline bool gt_is_root(struct intel_gt *gt)
+{
+ return !gt->info.id;
+}
+
+static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
+{
+ return container_of(uc, struct intel_gt, uc);
+}
+
+static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
+{
+ return container_of(guc, struct intel_gt, uc.guc);
+}
+
+static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
+{
+ return container_of(huc, struct intel_gt, uc.huc);
+}
+
+static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
+{
+ return container_of(gsc, struct intel_gt, gsc);
+}
+
+void intel_gt_common_init_early(struct intel_gt *gt);
+int intel_root_gt_init_early(struct drm_i915_private *i915);
+int intel_gt_assign_ggtt(struct intel_gt *gt);
+int intel_gt_init_mmio(struct intel_gt *gt);
+int __must_check intel_gt_init_hw(struct intel_gt *gt);
+int intel_gt_init(struct intel_gt *gt);
+void intel_gt_driver_register(struct intel_gt *gt);
+
+void intel_gt_driver_unregister(struct intel_gt *gt);
+void intel_gt_driver_remove(struct intel_gt *gt);
+void intel_gt_driver_release(struct intel_gt *gt);
+void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt);
+void intel_gt_clear_error_registers(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask);
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
+void intel_gt_chipset_flush(struct intel_gt *gt);
+
+static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
+ enum intel_gt_scratch_field field)
+{
+ return i915_ggtt_offset(gt->scratch) + field;
+}
+
+static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
+{
+ return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags) ||
+ test_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
+}
+
+static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
+{
+ GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
+ !test_bit(I915_WEDGED, &gt->reset.flags));
+
+ return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
+}
+
+int intel_gt_probe_all(struct drm_i915_private *i915);
+int intel_gt_tiles_init(struct drm_i915_private *i915);
+void intel_gt_release_all(struct drm_i915_private *i915);
+
+#define for_each_gt(gt__, i915__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_MAX_GT; \
+ (id__)++) \
+ for_each_if(((gt__) = (i915__)->gt[(id__)]))
+
+void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p);
+
+void intel_gt_watchdog_work(struct work_struct *work);
+
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+ return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+ return intel_gt_tlb_seqno(gt) | 1;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
+
+#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
new file mode 100644
index 000000000..cadfd8578
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_buffer_pool.h"
+
+static struct list_head *
+bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
+{
+ int n;
+
+ /*
+ * Compute a power-of-two bucket, but throw everything greater than
+ * 16KiB into the same bucket: i.e. the buckets hold objects of
+ * (1 page, 2 pages, 4 pages, 8+ pages).
+ */
+ n = fls(sz >> PAGE_SHIFT) - 1;
+ if (n >= ARRAY_SIZE(pool->cache_list))
+ n = ARRAY_SIZE(pool->cache_list) - 1;
+
+ return &pool->cache_list[n];
+}
+
+static void node_free(struct intel_gt_buffer_pool_node *node)
+{
+ i915_gem_object_put(node->obj);
+ i915_active_fini(&node->active);
+ kfree_rcu(node, rcu);
+}
+
+static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
+{
+ struct intel_gt_buffer_pool_node *node, *stale = NULL;
+ bool active = false;
+ int n;
+
+ /* Free buffers that have not been used in the past second */
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct list_head *list = &pool->cache_list[n];
+
+ if (list_empty(list))
+ continue;
+
+ if (spin_trylock_irq(&pool->lock)) {
+ struct list_head *pos;
+
+ /* Most recent at head; oldest at tail */
+ list_for_each_prev(pos, list) {
+ unsigned long age;
+
+ node = list_entry(pos, typeof(*node), link);
+
+ age = READ_ONCE(node->age);
+ if (!age || jiffies - age < keep)
+ break;
+
+ /* Check we are the first to claim this node */
+ if (!xchg(&node->age, 0))
+ break;
+
+ node->free = stale;
+ stale = node;
+ }
+ if (!list_is_last(pos, list))
+ __list_del_many(pos, list);
+
+ spin_unlock_irq(&pool->lock);
+ }
+
+ active |= !list_empty(list);
+ }
+
+ while ((node = stale)) {
+ stale = stale->free;
+ node_free(node);
+ }
+
+ return active;
+}
+
+static void pool_free_work(struct work_struct *wrk)
+{
+ struct intel_gt_buffer_pool *pool =
+ container_of(wrk, typeof(*pool), work.work);
+
+ if (pool_free_older_than(pool, HZ))
+ schedule_delayed_work(&pool->work,
+ round_jiffies_up_relative(HZ));
+}
+
+static void pool_retire(struct i915_active *ref)
+{
+ struct intel_gt_buffer_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct intel_gt_buffer_pool *pool = node->pool;
+ struct list_head *list = bucket_for_size(pool, node->obj->base.size);
+ unsigned long flags;
+
+ if (node->pinned) {
+ i915_gem_object_unpin_pages(node->obj);
+
+ /* Return this object to the shrinker pool */
+ i915_gem_object_make_purgeable(node->obj);
+ node->pinned = false;
+ }
+
+ GEM_BUG_ON(node->age);
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add_rcu(&node->link, list);
+ WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ schedule_delayed_work(&pool->work,
+ round_jiffies_up_relative(HZ));
+}
+
+void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
+{
+ assert_object_held(node->obj);
+
+ if (node->pinned)
+ return;
+
+ __i915_gem_object_pin_pages(node->obj);
+ /* Hide this pinned object from the shrinker until retired */
+ i915_gem_object_make_unshrinkable(node->obj);
+ node->pinned = true;
+}
+
+static struct intel_gt_buffer_pool_node *
+node_create(struct intel_gt_buffer_pool *pool, size_t sz,
+ enum i915_map_type type)
+{
+ struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
+ struct intel_gt_buffer_pool_node *node;
+ struct drm_i915_gem_object *obj;
+
+ node = kmalloc(sizeof(*node),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->age = 0;
+ node->pool = pool;
+ node->pinned = false;
+ i915_active_init(&node->active, NULL, pool_retire, 0);
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj)) {
+ i915_active_fini(&node->active);
+ kfree(node);
+ return ERR_CAST(obj);
+ }
+
+ i915_gem_object_set_readonly(obj);
+
+ node->type = type;
+ node->obj = obj;
+ return node;
+}
+
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type)
+{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+ struct intel_gt_buffer_pool_node *node;
+ struct list_head *list;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+ list = bucket_for_size(pool, size);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(node, list, link) {
+ unsigned long age;
+
+ if (node->obj->base.size < size)
+ continue;
+
+ if (node->type != type)
+ continue;
+
+ age = READ_ONCE(node->age);
+ if (!age)
+ continue;
+
+ if (cmpxchg(&node->age, age, 0) == age) {
+ spin_lock_irq(&pool->lock);
+ list_del_rcu(&node->link);
+ spin_unlock_irq(&pool->lock);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (&node->link == list) {
+ node = node_create(pool, size, type);
+ if (IS_ERR(node))
+ return node;
+ }
+
+ ret = i915_active_acquire(&node->active);
+ if (ret) {
+ node_free(node);
+ return ERR_PTR(ret);
+ }
+
+ return node;
+}
+
+void intel_gt_init_buffer_pool(struct intel_gt *gt)
+{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+ int n;
+
+ spin_lock_init(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ INIT_LIST_HEAD(&pool->cache_list[n]);
+ INIT_DELAYED_WORK(&pool->work, pool_free_work);
+}
+
+void intel_gt_flush_buffer_pool(struct intel_gt *gt)
+{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+
+ do {
+ while (pool_free_older_than(pool, 0))
+ ;
+ } while (cancel_delayed_work_sync(&pool->work));
+}
+
+void intel_gt_fini_buffer_pool(struct intel_gt *gt)
+{
+ struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
new file mode 100644
index 000000000..487b8a552
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_GT_BUFFER_POOL_H
+#define INTEL_GT_BUFFER_POOL_H
+
+#include <linux/types.h>
+
+#include "i915_active.h"
+#include "intel_gt_buffer_pool_types.h"
+
+struct intel_gt;
+struct i915_request;
+
+struct intel_gt_buffer_pool_node *
+intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
+ enum i915_map_type type);
+
+void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node);
+
+static inline int
+intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
+ struct i915_request *rq)
+{
+ /* did we call mark_used? */
+ GEM_WARN_ON(!node->pinned);
+
+ return i915_active_add_request(&node->active, rq);
+}
+
+static inline void
+intel_gt_buffer_pool_put(struct intel_gt_buffer_pool_node *node)
+{
+ i915_active_release(&node->active);
+}
+
+void intel_gt_init_buffer_pool(struct intel_gt *gt);
+void intel_gt_flush_buffer_pool(struct intel_gt *gt);
+void intel_gt_fini_buffer_pool(struct intel_gt *gt);
+
+#endif /* INTEL_GT_BUFFER_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
new file mode 100644
index 000000000..df1d75d08
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_GT_BUFFER_POOL_TYPES_H
+#define INTEL_GT_BUFFER_POOL_TYPES_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "gem/i915_gem_object_types.h"
+#include "i915_active_types.h"
+
+struct intel_gt_buffer_pool {
+ spinlock_t lock;
+ struct list_head cache_list[4];
+ struct delayed_work work;
+};
+
+struct intel_gt_buffer_pool_node {
+ struct i915_active active;
+ struct drm_i915_gem_object *obj;
+ struct list_head link;
+ union {
+ struct intel_gt_buffer_pool *pool;
+ struct intel_gt_buffer_pool_node *free;
+ struct rcu_head rcu;
+ };
+ unsigned long age;
+ enum i915_map_type type;
+ u32 pinned;
+};
+
+#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
new file mode 100644
index 000000000..3f656d3db
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_regs.h"
+
+static u32 read_reference_ts_freq(struct intel_uncore *uncore)
+{
+ u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
+ u32 base_freq, frac_freq;
+
+ base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
+ base_freq *= 1000000;
+
+ frac_freq = ((ts_override &
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
+ frac_freq = 1000000 / (frac_freq + 1);
+
+ return base_freq + frac_freq;
+}
+
+static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
+ u32 rpm_config_reg)
+{
+ u32 f19_2_mhz = 19200000;
+ u32 f24_mhz = 24000000;
+ u32 f25_mhz = 25000000;
+ u32 f38_4_mhz = 38400000;
+ u32 crystal_clock =
+ (rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+ switch (crystal_clock) {
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+ return f38_4_mhz;
+ case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+ return f25_mhz;
+ default:
+ MISSING_CASE(crystal_clock);
+ return 0;
+ }
+}
+
+static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ /*
+ * Note that on gen11+, the clock frequency may be reconfigured.
+ * We do not, and we assume nobody else does.
+ *
+ * First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+ freq = gen11_get_crystal_clock_freq(uncore, c0);
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+}
+
+static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+}
+
+static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
+{
+ /*
+ * PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return 12500000;
+}
+
+static u32 gen2_read_clock_frequency(struct intel_uncore *uncore)
+{
+ /*
+ * PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configuration”
+ * (“CLKCFG”) MCHBAR register)
+ */
+ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+ if (GRAPHICS_VER(uncore->i915) >= 11)
+ return gen11_read_clock_frequency(uncore);
+ else if (GRAPHICS_VER(uncore->i915) >= 9)
+ return gen9_read_clock_frequency(uncore);
+ else if (GRAPHICS_VER(uncore->i915) >= 5)
+ return gen5_read_clock_frequency(uncore);
+ else
+ return gen2_read_clock_frequency(uncore);
+}
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt)
+{
+ gt->clock_frequency = read_clock_frequency(gt->uncore);
+
+ /* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
+ if (GRAPHICS_VER(gt->i915) == 11)
+ gt->clock_period_ns = NSEC_PER_SEC / 13750000;
+ else if (gt->clock_frequency)
+ gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
+
+ GT_TRACE(gt,
+ "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
+ gt->clock_frequency / 1000,
+ gt->clock_period_ns,
+ div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
+ USEC_PER_SEC));
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_gt_check_clock_frequency(const struct intel_gt *gt)
+{
+ if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
+ dev_err(gt->i915->drm.dev,
+ "GT clock frequency changed, was %uHz, now %uHz!\n",
+ gt->clock_frequency,
+ read_clock_frequency(gt->uncore));
+ }
+}
+#endif
+
+static u64 div_u64_roundup(u64 nom, u32 den)
+{
+ return div_u64(nom + den - 1, den);
+}
+
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
+{
+ return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
+}
+
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
+{
+ return intel_gt_clock_interval_to_ns(gt, 16 * count);
+}
+
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
+{
+ return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
+}
+
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
+{
+ u64 val;
+
+ /*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+ val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
+ if (GRAPHICS_VER(gt->i915) == 6)
+ val = div_u64_roundup(val, 25) * 25;
+
+ return val;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
new file mode 100644
index 000000000..8b03e97a8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CLOCK_UTILS_H__
+#define __INTEL_GT_CLOCK_UTILS_H__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_gt_check_clock_frequency(const struct intel_gt *gt);
+#else
+static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
+#endif
+
+u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count);
+u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count);
+
+u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns);
+u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns);
+
+#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
new file mode 100644
index 000000000..dd53641f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_engines_debugfs.h"
+#include "intel_gt_mcr.h"
+#include "intel_gt_pm_debugfs.h"
+#include "intel_sseu_debugfs.h"
+#include "pxp/intel_pxp_debugfs.h"
+#include "uc/intel_uc_debugfs.h"
+
+int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val)
+{
+ int ret = intel_gt_terminally_wedged(gt);
+
+ switch (ret) {
+ case -EIO:
+ *val = 1;
+ return 0;
+ case 0:
+ *val = 0;
+ return 0;
+ default:
+ return ret;
+ }
+}
+
+void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
+{
+ /* Flush any previous reset before applying for a new one */
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+
+ intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE,
+ "Manually reset engine mask to %llx", val);
+}
+
+/*
+ * keep the interface clean where the first parameter
+ * is a 'struct intel_gt *' instead of 'void *'
+ */
+static int __intel_gt_debugfs_reset_show(void *data, u64 *val)
+{
+ return intel_gt_debugfs_reset_show(data, val);
+}
+
+static int __intel_gt_debugfs_reset_store(void *data, u64 val)
+{
+ intel_gt_debugfs_reset_store(data, val);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show,
+ __intel_gt_debugfs_reset_store, "%llu\n");
+
+static int steering_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct intel_gt *gt = m->private;
+
+ intel_gt_mcr_report_steering(&p, gt, true);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(steering);
+
+static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "reset", &reset_fops, NULL },
+ { "steering", &steering_fops },
+ };
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
+
+void intel_gt_debugfs_register(struct intel_gt *gt)
+{
+ struct dentry *root;
+
+ if (!gt->i915->drm.primary->debugfs_root)
+ return;
+
+ root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
+ if (IS_ERR(root))
+ return;
+
+ gt_debugfs_register(gt, root);
+
+ intel_gt_engines_debugfs_register(gt, root);
+ intel_gt_pm_debugfs_register(gt, root);
+ intel_sseu_debugfs_register(gt, root);
+
+ intel_uc_debugfs_register(&gt->uc, root);
+ intel_pxp_debugfs_register(&gt->pxp, root);
+}
+
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct intel_gt_debugfs_file *files,
+ unsigned long count, void *data)
+{
+ while (count--) {
+ umode_t mode = files->fops->write ? 0644 : 0444;
+
+ if (!files->eval || files->eval(data))
+ debugfs_create_file(files->name,
+ mode, root, data,
+ files->fops);
+
+ files++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
new file mode 100644
index 000000000..e4110eebf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_DEBUGFS_H
+#define INTEL_GT_DEBUGFS_H
+
+#include <linux/file.h>
+
+struct intel_gt;
+
+#define __GT_DEBUGFS_ATTRIBUTE_FOPS(__name) \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+__GT_DEBUGFS_ATTRIBUTE_FOPS(__name)
+
+#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(__name, __size_vf) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open_size(file, __name ## _show, inode->i_private, \
+ __size_vf(inode->i_private)); \
+} \
+__GT_DEBUGFS_ATTRIBUTE_FOPS(__name)
+
+void intel_gt_debugfs_register(struct intel_gt *gt);
+
+struct intel_gt_debugfs_file {
+ const char *name;
+ const struct file_operations *fops;
+ bool (*eval)(void *data);
+};
+
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct intel_gt_debugfs_file *files,
+ unsigned long count, void *data);
+
+/* functions that need to be accessed by the upper level non-gt interfaces */
+int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val);
+void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val);
+
+#endif /* INTEL_GT_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
new file mode 100644
index 000000000..8f9b874fd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "i915_drv.h" /* for_each_engine! */
+#include "intel_engine.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_engines_debugfs.h"
+
+static int engines_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct drm_printer p;
+
+ p = drm_seq_file_printer(m);
+ for_each_engine(engine, gt, id)
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(engines);
+
+void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "engines", &engines_fops },
+ };
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h
new file mode 100644
index 000000000..dda113452
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_ENGINES_DEBUGFS_H
+#define INTEL_GT_ENGINES_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+
+void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* INTEL_GT_ENGINES_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
new file mode 100644
index 000000000..f26882fdc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/sched/clock.h>
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_breadcrumbs.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_regs.h"
+#include "intel_uncore.h"
+#include "intel_rps.h"
+#include "pxp/intel_pxp_irq.h"
+
+static void guc_irq_handler(struct intel_guc *guc, u16 iir)
+{
+ if (iir & GUC_INTR_GUC2HOST)
+ intel_guc_to_host_event_handler(guc);
+}
+
+static u32
+gen11_gt_engine_identity(struct intel_gt *gt,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 timeout_ts;
+ u32 ident;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
+
+ /*
+ * NB: Specs do not specify how long to spin wait,
+ * so we do ~100us as an educated guess.
+ */
+ timeout_ts = (local_clock() >> 10) + 100;
+ do {
+ ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
+ } while (!(ident & GEN11_INTR_DATA_VALID) &&
+ !time_after32(local_clock() >> 10, timeout_ts));
+
+ if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
+ DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
+ return 0;
+ }
+
+ raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
+ GEN11_INTR_DATA_VALID);
+
+ return ident;
+}
+
+static void
+gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
+ const u16 iir)
+{
+ struct intel_gt *media_gt = gt->i915->media_gt;
+
+ if (instance == OTHER_GUC_INSTANCE)
+ return guc_irq_handler(&gt->uc.guc, iir);
+ if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
+ return guc_irq_handler(&media_gt->uc.guc, iir);
+
+ if (instance == OTHER_GTPM_INSTANCE)
+ return gen11_rps_irq_handler(&gt->rps, iir);
+ if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
+ return gen11_rps_irq_handler(&media_gt->rps, iir);
+
+ if (instance == OTHER_KCR_INSTANCE)
+ return intel_pxp_irq_handler(&gt->pxp, iir);
+
+ if (instance == OTHER_GSC_INSTANCE)
+ return intel_gsc_irq_handler(gt, iir);
+
+ WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
+ instance, iir);
+}
+
+static void
+gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
+ const u8 instance, const u16 iir)
+{
+ struct intel_engine_cs *engine;
+
+ /*
+ * Platforms with standalone media have their media engines in another
+ * GT.
+ */
+ if (MEDIA_VER(gt->i915) >= 13 &&
+ (class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) {
+ if (!gt->i915->media_gt)
+ goto err;
+
+ gt = gt->i915->media_gt;
+ }
+
+ if (instance <= MAX_ENGINE_INSTANCE)
+ engine = gt->engine_class[class][instance];
+ else
+ engine = NULL;
+
+ if (likely(engine))
+ return intel_engine_cs_irq(engine, iir);
+
+err:
+ WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
+ class, instance);
+}
+
+static void
+gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
+{
+ const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
+ const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
+ const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
+
+ if (unlikely(!intr))
+ return;
+
+ if (class <= COPY_ENGINE_CLASS || class == COMPUTE_CLASS)
+ return gen11_engine_irq_handler(gt, class, instance, intr);
+
+ if (class == OTHER_CLASS)
+ return gen11_other_irq_handler(gt, instance, intr);
+
+ WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
+ class, instance, intr);
+}
+
+static void
+gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ unsigned long intr_dw;
+ unsigned int bit;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+
+ for_each_set_bit(bit, &intr_dw, 32) {
+ const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
+
+ gen11_gt_identity_handler(gt, ident);
+ }
+
+ /* Clear must be after shared has been served for engine */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+}
+
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
+{
+ unsigned int bank;
+
+ spin_lock(gt->irq_lock);
+
+ for (bank = 0; bank < 2; bank++) {
+ if (master_ctl & GEN11_GT_DW_IRQ(bank))
+ gen11_gt_bank_handler(gt, bank);
+ }
+
+ spin_unlock(gt->irq_lock);
+}
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 dw;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+ if (dw & BIT(bit)) {
+ /*
+ * According to the BSpec, DW_IIR bits cannot be cleared without
+ * first servicing the Selector & Shared IIR registers.
+ */
+ gen11_gt_engine_identity(gt, bank, bit);
+
+ /*
+ * We locked GT INT DW by reading it. If we want to (try
+ * to) recover from this successfully, we need to clear
+ * our bit, otherwise we are locking the register for
+ * everybody.
+ */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
+
+ return true;
+ }
+
+ return false;
+}
+
+void gen11_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ /* Disable RCS, BCS, VCS and VECS class engines. */
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
+ if (CCS_MASK(gt))
+ intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0);
+
+ /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
+ intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
+ intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
+ intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
+ intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
+ intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
+ intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
+ intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
+ intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0);
+ if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
+ intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0);
+
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~0);
+}
+
+void gen11_gt_irq_postinstall(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 irqs = GT_RENDER_USER_INTERRUPT;
+ const u32 gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1);
+ u32 dmask;
+ u32 smask;
+
+ if (!intel_uc_wants_guc_submission(&gt->uc))
+ irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
+
+ dmask = irqs << 16 | irqs;
+ smask = irqs << 16;
+
+ BUILD_BUG_ON(irqs & 0xffff0000);
+
+ /* Enable RCS, BCS, VCS and VECS class interrupts. */
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
+ if (CCS_MASK(gt))
+ intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE,
+ gsc_mask);
+
+ /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+ if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
+ intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
+ intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
+ intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
+ intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
+ intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
+ intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
+ intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
+ intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask);
+ if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
+ intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask);
+
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+
+ /* Same thing for GuC interrupts */
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+}
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ gt_iir);
+
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir);
+}
+
+static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
+{
+ if (!HAS_L3_DPF(gt->i915))
+ return;
+
+ spin_lock(gt->irq_lock);
+ gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
+ spin_unlock(gt->irq_lock);
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+ gt->i915->l3_parity.which_slice |= 1 << 1;
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+ gt->i915->l3_parity.which_slice |= 1 << 0;
+
+ schedule_work(&gt->i915->l3_parity.error_work);
+}
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ gt_iir);
+
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir >> 12);
+
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
+ gt_iir >> 22);
+
+ if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+ GT_BSD_CS_ERROR_INTERRUPT |
+ GT_CS_MASTER_ERROR_INTERRUPT))
+ DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
+
+ if (gt_iir & GT_PARITY_ERROR(gt->i915))
+ gen7_parity_error_irq_handler(gt, gt_iir);
+}
+
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 iir;
+
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ iir = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(iir)) {
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ iir >> GEN8_RCS_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
+ iir >> GEN8_BCS_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(0), iir);
+ }
+ }
+
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ iir = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(iir)) {
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ iir >> GEN8_VCS0_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ iir >> GEN8_VCS1_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(1), iir);
+ }
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ iir = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(iir)) {
+ intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ iir >> GEN8_VECS_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(3), iir);
+ }
+ }
+
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ iir = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(iir)) {
+ gen6_rps_irq_handler(&gt->rps, iir);
+ guc_irq_handler(&gt->uc.guc, iir >> 16);
+ raw_reg_write(regs, GEN8_GT_IIR(2), iir);
+ }
+ }
+}
+
+void gen8_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 3);
+}
+
+void gen8_gt_irq_postinstall(struct intel_gt *gt)
+{
+ /* These are interrupts we'll toggle with the ring mask register */
+ const u32 irqs =
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
+ const u32 gt_interrupts[] = {
+ irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
+ irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
+ 0,
+ irqs << GEN8_VECS_IRQ_SHIFT,
+ };
+ struct intel_uncore *uncore = gt->uncore;
+
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
+ */
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+}
+
+static void gen5_gt_update_irq(struct intel_gt *gt,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ lockdep_assert_held(gt->irq_lock);
+
+ GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
+
+ gt->gt_imr &= ~interrupt_mask;
+ gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
+ intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
+}
+
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
+{
+ gen5_gt_update_irq(gt, mask, mask);
+ intel_uncore_posting_read_fw(gt->uncore, GTIMR);
+}
+
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
+{
+ gen5_gt_update_irq(gt, mask, 0);
+}
+
+void gen5_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ GEN3_IRQ_RESET(uncore, GT);
+ if (GRAPHICS_VER(gt->i915) >= 6)
+ GEN3_IRQ_RESET(uncore, GEN6_PM);
+}
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 pm_irqs = 0;
+ u32 gt_irqs = 0;
+
+ gt->gt_imr = ~0;
+ if (HAS_L3_DPF(gt->i915)) {
+ /* L3 parity interrupt is always unmasked. */
+ gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
+ gt_irqs |= GT_PARITY_ERROR(gt->i915);
+ }
+
+ gt_irqs |= GT_RENDER_USER_INTERRUPT;
+ if (GRAPHICS_VER(gt->i915) == 5)
+ gt_irqs |= ILK_BSD_USER_INTERRUPT;
+ else
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+
+ GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
+
+ if (GRAPHICS_VER(gt->i915) >= 6) {
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS
+ * itself is enabled/disabled.
+ */
+ if (HAS_ENGINE(gt, VECS0)) {
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
+
+ gt->pm_imr = 0xffffffff;
+ GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
new file mode 100644
index 000000000..41cad3866
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_IRQ_H
+#define INTEL_GT_IRQ_H
+
+#include <linux/types.h>
+
+#include "intel_engine_types.h"
+
+struct intel_gt;
+
+#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
+ GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS0_IRQ | \
+ GEN8_GT_VCS1_IRQ | \
+ GEN8_GT_VECS_IRQ | \
+ GEN8_GT_PM_IRQ | \
+ GEN8_GT_GUC_IRQ)
+
+void gen11_gt_irq_reset(struct intel_gt *gt);
+void gen11_gt_irq_postinstall(struct intel_gt *gt);
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl);
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+ const unsigned int bank,
+ const unsigned int bit);
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt);
+void gen5_gt_irq_reset(struct intel_gt *gt);
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask);
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl);
+void gen8_gt_irq_reset(struct intel_gt *gt);
+void gen8_gt_irq_postinstall(struct intel_gt *gt);
+
+static inline void intel_engine_cs_irq(struct intel_engine_cs *engine, u16 iir)
+{
+ if (iir)
+ engine->irq_handler(engine, iir);
+}
+
+static inline void
+intel_engine_set_irq_handler(struct intel_engine_cs *engine,
+ void (*fn)(struct intel_engine_cs *engine,
+ u16 iir))
+{
+ /*
+ * As the interrupt is live as allocate and setup the engines,
+ * err on the side of caution and apply barriers to updating
+ * the irq handler callback. This assures that when we do use
+ * the engine, we will receive interrupts only to ourselves,
+ * and not lose any.
+ */
+ smp_store_mb(engine->irq_handler, fn);
+}
+
+#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
new file mode 100644
index 000000000..e79405a45
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_gt_mcr.h"
+#include "intel_gt_regs.h"
+
+/**
+ * DOC: GT Multicast/Replicated (MCR) Register Support
+ *
+ * Some GT registers are designed as "multicast" or "replicated" registers:
+ * multiple instances of the same register share a single MMIO offset. MCR
+ * registers are generally used when the hardware needs to potentially track
+ * independent values of a register per hardware unit (e.g., per-subslice,
+ * per-L3bank, etc.). The specific types of replication that exist vary
+ * per-platform.
+ *
+ * MMIO accesses to MCR registers are controlled according to the settings
+ * programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR
+ * registers can be done in either a (i.e., a single write updates all
+ * instances of the register to the same value) or unicast (a write updates only
+ * one specific instance). Reads of MCR registers always operate in a unicast
+ * manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
+ * Selection of a specific MCR instance for unicast operations is referred to
+ * as "steering."
+ *
+ * If MCR register operations are steered toward a hardware unit that is
+ * fused off or currently powered down due to power gating, the MMIO operation
+ * is "terminated" by the hardware. Terminated read operations will return a
+ * value of zero and terminated unicast write operations will be silently
+ * ignored.
+ */
+
+#define HAS_MSLICE_STEERING(dev_priv) (INTEL_INFO(dev_priv)->has_mslice_steering)
+
+static const char * const intel_steering_types[] = {
+ "L3BANK",
+ "MSLICE",
+ "LNCF",
+ "INSTANCE 0",
+};
+
+static const struct intel_mmio_range icl_l3bank_steering_table[] = {
+ { 0x00B100, 0x00B3FF },
+ {},
+};
+
+static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
+ { 0x004000, 0x004AFF },
+ { 0x00C800, 0x00CFFF },
+ { 0x00DD00, 0x00DDFF },
+ { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
+ {},
+};
+
+static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D800, 0x00D8FF },
+ {},
+};
+
+static const struct intel_mmio_range dg2_lncf_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D880, 0x00D8FF },
+ {},
+};
+
+/*
+ * We have several types of MCR registers on PVC where steering to (0,0)
+ * will always provide us with a non-terminated value. We'll stick them
+ * all in the same table for simplicity.
+ */
+static const struct intel_mmio_range pvc_instance0_steering_table[] = {
+ { 0x004000, 0x004AFF }, /* HALF-BSLICE */
+ { 0x008800, 0x00887F }, /* CC */
+ { 0x008A80, 0x008AFF }, /* TILEPSMI */
+ { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
+ { 0x00B100, 0x00B3FF }, /* L3BANK */
+ { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
+ { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
+ { 0x00DD00, 0x00DDFF }, /* BSLICE */
+ { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
+ { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
+ { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
+ { 0x024180, 0x0241FF }, /* HALF-BSLICE */
+ {},
+};
+
+void intel_gt_mcr_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /*
+ * An mslice is unavailable only if both the meml3 for the slice is
+ * disabled *and* all of the DSS in the slice (quadrant) are disabled.
+ */
+ if (HAS_MSLICE_STEERING(i915)) {
+ gt->info.mslice_mask =
+ intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
+ GEN_DSS_PER_MSLICE);
+ gt->info.mslice_mask |=
+ (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
+ GEN12_MEML3_EN_MASK);
+
+ if (!gt->info.mslice_mask) /* should be impossible! */
+ drm_warn(&i915->drm, "mslice mask all zero!\n");
+ }
+
+ if (IS_PONTEVECCHIO(i915)) {
+ gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
+ } else if (IS_DG2(i915)) {
+ gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[LNCF] = dg2_lncf_steering_table;
+ } else if (IS_XEHPSDV(i915)) {
+ gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+ gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
+ } else if (GRAPHICS_VER(i915) >= 11 &&
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
+ gt->steering_table[L3BANK] = icl_l3bank_steering_table;
+ gt->info.l3bank_mask =
+ ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
+ GEN10_L3BANK_MASK;
+ if (!gt->info.l3bank_mask) /* should be impossible! */
+ drm_warn(&i915->drm, "L3 bank mask is all zero!\n");
+ } else if (GRAPHICS_VER(i915) >= 11) {
+ /*
+ * We expect all modern platforms to have at least some
+ * type of steering that needs to be initialized.
+ */
+ MISSING_CASE(INTEL_INFO(i915)->platform);
+ }
+}
+
+/*
+ * rw_with_mcr_steering_fw - Access a register with specific MCR steering
+ * @uncore: pointer to struct intel_uncore
+ * @reg: register being accessed
+ * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
+ * @group: group number (documented as "sliceid" on older platforms)
+ * @instance: instance number (documented as "subsliceid" on older platforms)
+ * @value: register value to be written (ignored for read)
+ *
+ * Return: 0 for write access. register value for read access.
+ *
+ * Caller needs to make sure the relevant forcewake wells are up.
+ */
+static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int group, int instance, u32 value)
+{
+ u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
+
+ lockdep_assert_held(&uncore->lock);
+
+ if (GRAPHICS_VER(uncore->i915) >= 11) {
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
+
+ /*
+ * Wa_22013088509
+ *
+ * The setting of the multicast/unicast bit usually wouldn't
+ * matter for read operations (which always return the value
+ * from a single register instance regardless of how that bit
+ * is set), but some platforms have a workaround requiring us
+ * to remain in multicast mode for reads. There's no real
+ * downside to this, so we'll just go ahead and do so on all
+ * platforms; we'll only clear the multicast bit from the mask
+ * when exlicitly doing a write operation.
+ */
+ if (rw_flag == FW_REG_WRITE)
+ mcr_mask |= GEN11_MCR_MULTICAST;
+ } else {
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
+ }
+
+ old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
+
+ mcr &= ~mcr_mask;
+ mcr |= mcr_ss;
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+
+ if (rw_flag == FW_REG_READ)
+ val = intel_uncore_read_fw(uncore, reg);
+ else
+ intel_uncore_write_fw(uncore, reg, value);
+
+ mcr &= ~mcr_mask;
+ mcr |= old_mcr & mcr_mask;
+
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+
+ return val;
+}
+
+static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int group, int instance,
+ u32 value)
+{
+ enum forcewake_domains fw_domains;
+ u32 val;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
+ rw_flag);
+ fw_domains |= intel_uncore_forcewake_for_reg(uncore,
+ GEN8_MCR_SELECTOR,
+ FW_REG_READ | FW_REG_WRITE);
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+ val = rw_with_mcr_steering_fw(uncore, reg, rw_flag, group, instance, value);
+
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irq(&uncore->lock);
+
+ return val;
+}
+
+/**
+ * intel_gt_mcr_read - read a specific instance of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to read
+ * @group: the MCR group
+ * @instance: the MCR instance
+ *
+ * Returns the value read from an MCR register after steering toward a specific
+ * group/instance.
+ */
+u32 intel_gt_mcr_read(struct intel_gt *gt,
+ i915_reg_t reg,
+ int group, int instance)
+{
+ return rw_with_mcr_steering(gt->uncore, reg, FW_REG_READ, group, instance, 0);
+}
+
+/**
+ * intel_gt_mcr_unicast_write - write a specific instance of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ * @group: the MCR group
+ * @instance: the MCR instance
+ *
+ * Write an MCR register in unicast mode after steering toward a specific
+ * group/instance.
+ */
+void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_reg_t reg, u32 value,
+ int group, int instance)
+{
+ rw_with_mcr_steering(gt->uncore, reg, FW_REG_WRITE, group, instance, value);
+}
+
+/**
+ * intel_gt_mcr_multicast_write - write a value to all instances of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ *
+ * Write an MCR register in multicast mode to update all instances.
+ */
+void intel_gt_mcr_multicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value)
+{
+ intel_uncore_write(gt->uncore, reg, value);
+}
+
+/**
+ * intel_gt_mcr_multicast_write_fw - write a value to all instances of an MCR register
+ * @gt: GT structure
+ * @reg: the MCR register to write
+ * @value: value to write
+ *
+ * Write an MCR register in multicast mode to update all instances. This
+ * function assumes the caller is already holding any necessary forcewake
+ * domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
+ * be obtained automatically.
+ */
+void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_reg_t reg, u32 value)
+{
+ intel_uncore_write_fw(gt->uncore, reg, value);
+}
+
+/*
+ * reg_needs_read_steering - determine whether a register read requires
+ * explicit steering
+ * @gt: GT structure
+ * @reg: the register to check steering requirements for
+ * @type: type of multicast steering to check
+ *
+ * Determines whether @reg needs explicit steering of a specific type for
+ * reads.
+ *
+ * Returns false if @reg does not belong to a register range of the given
+ * steering type, or if the default (subslice-based) steering IDs are suitable
+ * for @type steering too.
+ */
+static bool reg_needs_read_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ enum intel_steering_type type)
+{
+ const u32 offset = i915_mmio_reg_offset(reg);
+ const struct intel_mmio_range *entry;
+
+ if (likely(!gt->steering_table[type]))
+ return false;
+
+ for (entry = gt->steering_table[type]; entry->end; entry++) {
+ if (offset >= entry->start && offset <= entry->end)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * get_nonterminated_steering - determines valid IDs for a class of MCR steering
+ * @gt: GT structure
+ * @type: multicast register type
+ * @group: Group ID returned
+ * @instance: Instance ID returned
+ *
+ * Determines group and instance values that will steer reads of the specified
+ * MCR class to a non-terminated instance.
+ */
+static void get_nonterminated_steering(struct intel_gt *gt,
+ enum intel_steering_type type,
+ u8 *group, u8 *instance)
+{
+ switch (type) {
+ case L3BANK:
+ *group = 0; /* unused */
+ *instance = __ffs(gt->info.l3bank_mask);
+ break;
+ case MSLICE:
+ GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
+ *group = __ffs(gt->info.mslice_mask);
+ *instance = 0; /* unused */
+ break;
+ case LNCF:
+ /*
+ * An LNCF is always present if its mslice is present, so we
+ * can safely just steer to LNCF 0 in all cases.
+ */
+ GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
+ *group = __ffs(gt->info.mslice_mask) << 1;
+ *instance = 0; /* unused */
+ break;
+ case INSTANCE0:
+ /*
+ * There are a lot of MCR types for which instance (0, 0)
+ * will always provide a non-terminated value.
+ */
+ *group = 0;
+ *instance = 0;
+ break;
+ default:
+ MISSING_CASE(type);
+ *group = 0;
+ *instance = 0;
+ }
+}
+
+/**
+ * intel_gt_mcr_get_nonterminated_steering - find group/instance values that
+ * will steer a register to a non-terminated instance
+ * @gt: GT structure
+ * @reg: register for which the steering is required
+ * @group: return variable for group steering
+ * @instance: return variable for instance steering
+ *
+ * This function returns a group/instance pair that is guaranteed to work for
+ * read steering of the given register. Note that a value will be returned even
+ * if the register is not replicated and therefore does not actually require
+ * steering.
+ */
+void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ u8 *group, u8 *instance)
+{
+ int type;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, group, instance);
+ return;
+ }
+ }
+
+ *group = gt->default_steering.groupid;
+ *instance = gt->default_steering.instanceid;
+}
+
+/**
+ * intel_gt_mcr_read_any_fw - reads one instance of an MCR register
+ * @gt: GT structure
+ * @reg: register to read
+ *
+ * Reads a GT MCR register. The read will be steered to a non-terminated
+ * instance (i.e., one that isn't fused off or powered down by power gating).
+ * This function assumes the caller is already holding any necessary forcewake
+ * domains; use intel_gt_mcr_read_any() in cases where forcewake should be
+ * obtained automatically.
+ *
+ * Returns the value from a non-terminated instance of @reg.
+ */
+u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_reg_t reg)
+{
+ int type;
+ u8 group, instance;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, &group, &instance);
+ return rw_with_mcr_steering_fw(gt->uncore, reg,
+ FW_REG_READ,
+ group, instance, 0);
+ }
+ }
+
+ return intel_uncore_read_fw(gt->uncore, reg);
+}
+
+/**
+ * intel_gt_mcr_read_any - reads one instance of an MCR register
+ * @gt: GT structure
+ * @reg: register to read
+ *
+ * Reads a GT MCR register. The read will be steered to a non-terminated
+ * instance (i.e., one that isn't fused off or powered down by power gating).
+ *
+ * Returns the value from a non-terminated instance of @reg.
+ */
+u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_reg_t reg)
+{
+ int type;
+ u8 group, instance;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (reg_needs_read_steering(gt, reg, type)) {
+ get_nonterminated_steering(gt, type, &group, &instance);
+ return rw_with_mcr_steering(gt->uncore, reg,
+ FW_REG_READ,
+ group, instance, 0);
+ }
+ }
+
+ return intel_uncore_read(gt->uncore, reg);
+}
+
+static void report_steering_type(struct drm_printer *p,
+ struct intel_gt *gt,
+ enum intel_steering_type type,
+ bool dump_table)
+{
+ const struct intel_mmio_range *entry;
+ u8 group, instance;
+
+ BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
+
+ if (!gt->steering_table[type]) {
+ drm_printf(p, "%s steering: uses default steering\n",
+ intel_steering_types[type]);
+ return;
+ }
+
+ get_nonterminated_steering(gt, type, &group, &instance);
+ drm_printf(p, "%s steering: group=0x%x, instance=0x%x\n",
+ intel_steering_types[type], group, instance);
+
+ if (!dump_table)
+ return;
+
+ for (entry = gt->steering_table[type]; entry->end; entry++)
+ drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
+}
+
+void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table)
+{
+ drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
+ gt->default_steering.groupid,
+ gt->default_steering.instanceid);
+
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ report_steering_type(p, gt, INSTANCE0, dump_table);
+ } else if (HAS_MSLICE_STEERING(gt->i915)) {
+ report_steering_type(p, gt, MSLICE, dump_table);
+ report_steering_type(p, gt, LNCF, dump_table);
+ }
+}
+
+/**
+ * intel_gt_mcr_get_ss_steering - returns the group/instance steering for a SS
+ * @gt: GT structure
+ * @dss: DSS ID to obtain steering for
+ * @group: pointer to storage for steering group ID
+ * @instance: pointer to storage for steering instance ID
+ *
+ * Returns the steering IDs (via the @group and @instance parameters) that
+ * correspond to a specific subslice/DSS ID.
+ */
+void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
+ unsigned int *group, unsigned int *instance)
+{
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ *group = dss / GEN_DSS_PER_CSLICE;
+ *instance = dss % GEN_DSS_PER_CSLICE;
+ } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
+ *group = dss / GEN_DSS_PER_GSLICE;
+ *instance = dss % GEN_DSS_PER_GSLICE;
+ } else {
+ *group = dss / GEN_MAX_SS_PER_HSW_SLICE;
+ *instance = dss % GEN_MAX_SS_PER_HSW_SLICE;
+ return;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
new file mode 100644
index 000000000..77a8b11c2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_MCR__
+#define __INTEL_GT_MCR__
+
+#include "intel_gt_types.h"
+
+void intel_gt_mcr_init(struct intel_gt *gt);
+
+u32 intel_gt_mcr_read(struct intel_gt *gt,
+ i915_reg_t reg,
+ int group, int instance);
+u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_reg_t reg);
+u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_reg_t reg);
+
+void intel_gt_mcr_unicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value,
+ int group, int instance);
+void intel_gt_mcr_multicast_write(struct intel_gt *gt,
+ i915_reg_t reg, u32 value);
+void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt,
+ i915_reg_t reg, u32 value);
+
+void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
+ i915_reg_t reg,
+ u8 *group, u8 *instance);
+
+void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table);
+
+void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
+ unsigned int *group, unsigned int *instance);
+
+/*
+ * Helper for for_each_ss_steering loop. On pre-Xe_HP platforms, subslice
+ * presence is determined by using the group/instance as direct lookups in the
+ * slice/subslice topology. On Xe_HP and beyond, the steering is unrelated to
+ * the topology, so we lookup the DSS ID directly in "slice 0."
+ */
+#define _HAS_SS(ss_, gt_, group_, instance_) ( \
+ GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
+ intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
+ intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
+
+/*
+ * Loop over each subslice/DSS and determine the group and instance IDs that
+ * should be used to steer MCR accesses toward this DSS.
+ */
+#define for_each_ss_steering(ss_, gt_, group_, instance_) \
+ for (ss_ = 0, intel_gt_mcr_get_ss_steering(gt_, 0, &group_, &instance_); \
+ ss_ < I915_MAX_SS_FUSE_BITS; \
+ ss_++, intel_gt_mcr_get_ss_steering(gt_, ss_, &group_, &instance_)) \
+ for_each_if(_HAS_SS(ss_, gt_, group_, instance_))
+
+#endif /* __INTEL_GT_MCR__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
new file mode 100644
index 000000000..f553e2173
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+#include <linux/suspend.h>
+
+#include "i915_drv.h"
+#include "i915_params.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_llc.h"
+#include "intel_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+#include "intel_wakeref.h"
+#include "pxp/intel_pxp_pm.h"
+
+#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
+
+static void user_forcewake(struct intel_gt *gt, bool suspend)
+{
+ int count = atomic_read(&gt->user_wakeref);
+
+ /* Inside suspend/resume so single threaded, no races to worry about. */
+ if (likely(!count))
+ return;
+
+ intel_gt_pm_get(gt);
+ if (suspend) {
+ GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
+ atomic_sub(count, &gt->wakeref.count);
+ } else {
+ atomic_add(count, &gt->wakeref.count);
+ }
+ intel_gt_pm_put(gt);
+}
+
+static void runtime_begin(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.start = ktime_get();
+ gt->stats.active = true;
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
+static void runtime_end(struct intel_gt *gt)
+{
+ local_irq_disable();
+ write_seqcount_begin(&gt->stats.lock);
+ gt->stats.active = false;
+ gt->stats.total =
+ ktime_add(gt->stats.total,
+ ktime_sub(ktime_get(), gt->stats.start));
+ write_seqcount_end(&gt->stats.lock);
+ local_irq_enable();
+}
+
+static int __gt_unpark(struct intel_wakeref *wf)
+{
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ struct drm_i915_private *i915 = gt->i915;
+
+ GT_TRACE(gt, "\n");
+
+ /*
+ * It seems that the DMC likes to transition between the DC states a lot
+ * when there are no connected displays (no active power domains) during
+ * command submission.
+ *
+ * This activity has negative impact on the performance of the chip with
+ * huge latencies observed in the interrupt handler and elsewhere.
+ *
+ * Work around it by grabbing a GT IRQ power domain whilst there is any
+ * GT activity, preventing any DC state transitions.
+ */
+ gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!gt->awake);
+
+ intel_rc6_unpark(&gt->rc6);
+ intel_rps_unpark(&gt->rps);
+ i915_pmu_gt_unparked(i915);
+ intel_guc_busyness_unpark(gt);
+
+ intel_gt_unpark_requests(gt);
+ runtime_begin(gt);
+
+ return 0;
+}
+
+static int __gt_park(struct intel_wakeref *wf)
+{
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
+ struct drm_i915_private *i915 = gt->i915;
+
+ GT_TRACE(gt, "\n");
+
+ runtime_end(gt);
+ intel_gt_park_requests(gt);
+
+ intel_guc_busyness_park(gt);
+ i915_vma_parked(gt);
+ i915_pmu_gt_parked(i915);
+ intel_rps_park(&gt->rps);
+ intel_rc6_park(&gt->rc6);
+
+ /* Everything switched off, flush any residual interrupt just in case */
+ intel_synchronize_irq(i915);
+
+ /* Defer dropping the display power well for 100ms, it's slow! */
+ GEM_BUG_ON(!wakeref);
+ intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+
+ return 0;
+}
+
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __gt_unpark,
+ .put = __gt_park,
+};
+
+void intel_gt_pm_init_early(struct intel_gt *gt)
+{
+ /*
+ * We access the runtime_pm structure via gt->i915 here rather than
+ * gt->uncore as we do elsewhere in the file because gt->uncore is not
+ * yet initialized for all tiles at this point in the driver startup.
+ * runtime_pm is per-device rather than per-tile, so this is still the
+ * correct structure.
+ */
+ intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
+ seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
+}
+
+void intel_gt_pm_init(struct intel_gt *gt)
+{
+ /*
+ * Enabling power-management should be "self-healing". If we cannot
+ * enable a feature, simply leave it disabled with a notice to the
+ * user.
+ */
+ intel_rc6_init(&gt->rc6);
+ intel_rps_init(&gt->rps);
+}
+
+static bool reset_engines(struct intel_gt *gt)
+{
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ return false;
+
+ return __intel_gt_reset(gt, ALL_ENGINES) == 0;
+}
+
+static void gt_sanitize(struct intel_gt *gt, bool force)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+
+ GT_TRACE(gt, "force:%s", str_yes_no(force));
+
+ /* Use a raw wakeref to avoid calling intel_display_power_get early */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ intel_gt_check_clock_frequency(gt);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (intel_gt_is_wedged(gt))
+ intel_gt_unset_wedged(gt);
+
+ /* For GuC mode, ensure submission is disabled before stopping ring */
+ intel_uc_reset_prepare(&gt->uc);
+
+ for_each_engine(engine, gt, id) {
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
+
+ if (engine->sanitize)
+ engine->sanitize(engine);
+ }
+
+ if (reset_engines(gt) || force) {
+ for_each_engine(engine, gt, id)
+ __intel_engine_reset(engine, false);
+ }
+
+ intel_uc_reset(&gt->uc, false);
+
+ for_each_engine(engine, gt, id)
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
+
+ intel_rps_sanitize(&gt->rps);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+void intel_gt_pm_fini(struct intel_gt *gt)
+{
+ intel_rc6_fini(&gt->rc6);
+}
+
+int intel_gt_resume(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ err = intel_gt_has_unrecoverable_error(gt);
+ if (err)
+ return err;
+
+ GT_TRACE(gt, "\n");
+
+ /*
+ * After resume, we may need to poke into the pinned kernel
+ * contexts to paper over any damage caused by the sudden suspend.
+ * Only the kernel contexts should remain pinned over suspend,
+ * allowing us to fixup the user contexts on their first pin.
+ */
+ gt_sanitize(gt, true);
+
+ intel_gt_pm_get(gt);
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_rc6_sanitize(&gt->rc6);
+ if (intel_gt_is_wedged(gt)) {
+ err = -EIO;
+ goto out_fw;
+ }
+
+ /* Only when the HW is re-initialised, can we replay the requests */
+ err = intel_gt_init_hw(gt);
+ if (err) {
+ i915_probe_error(gt->i915,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ goto err_wedged;
+ }
+
+ intel_uc_reset_finish(&gt->uc);
+
+ intel_rps_enable(&gt->rps);
+ intel_llc_enable(&gt->llc);
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+
+ engine->serial++; /* kernel context lost */
+ err = intel_engine_resume(engine);
+
+ intel_engine_pm_put(engine);
+ if (err) {
+ drm_err(&gt->i915->drm,
+ "Failed to restart %s (%d)\n",
+ engine->name, err);
+ goto err_wedged;
+ }
+ }
+
+ intel_rc6_enable(&gt->rc6);
+
+ intel_uc_resume(&gt->uc);
+
+ intel_pxp_resume(&gt->pxp);
+
+ user_forcewake(gt, false);
+
+out_fw:
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_pm_put(gt);
+ return err;
+
+err_wedged:
+ intel_gt_set_wedged(gt);
+ goto out_fw;
+}
+
+static void wait_for_suspend(struct intel_gt *gt)
+{
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
+ if (intel_gt_wait_for_idle(gt, I915_GT_SUSPEND_IDLE_TIMEOUT) == -ETIME) {
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ intel_gt_set_wedged(gt);
+ intel_gt_retire_requests(gt);
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+}
+
+void intel_gt_suspend_prepare(struct intel_gt *gt)
+{
+ user_forcewake(gt, true);
+ wait_for_suspend(gt);
+
+ intel_pxp_suspend_prepare(&gt->pxp);
+}
+
+static suspend_state_t pm_suspend_target(void)
+{
+#if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
+ return pm_suspend_target_state;
+#else
+ return PM_SUSPEND_TO_IDLE;
+#endif
+}
+
+void intel_gt_suspend_late(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+
+ /* We expect to be idle already; but also want to be independent */
+ wait_for_suspend(gt);
+
+ if (is_mock_gt(gt))
+ return;
+
+ GEM_BUG_ON(gt->awake);
+
+ intel_uc_suspend(&gt->uc);
+ intel_pxp_suspend(&gt->pxp);
+
+ /*
+ * On disabling the device, we want to turn off HW access to memory
+ * that we no longer own.
+ *
+ * However, not all suspend-states disable the device. S0 (s2idle)
+ * is effectively runtime-suspend, the device is left powered on
+ * but needs to be put into a low power state. We need to keep
+ * powermanagement enabled, but we also retain system state and so
+ * it remains safe to keep on using our allocated memory.
+ */
+ if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
+ return;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ intel_rps_disable(&gt->rps);
+ intel_rc6_disable(&gt->rc6);
+ intel_llc_disable(&gt->llc);
+ }
+
+ gt_sanitize(gt, false);
+
+ GT_TRACE(gt, "\n");
+}
+
+void intel_gt_runtime_suspend(struct intel_gt *gt)
+{
+ intel_pxp_runtime_suspend(&gt->pxp);
+ intel_uc_runtime_suspend(&gt->uc);
+
+ GT_TRACE(gt, "\n");
+}
+
+int intel_gt_runtime_resume(struct intel_gt *gt)
+{
+ int ret;
+
+ GT_TRACE(gt, "\n");
+ intel_gt_init_swizzling(gt);
+ intel_ggtt_restore_fences(gt->ggtt);
+
+ ret = intel_uc_runtime_resume(&gt->uc);
+ if (ret)
+ return ret;
+
+ intel_pxp_runtime_resume(&gt->pxp);
+
+ return 0;
+}
+
+static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ ktime_t total = gt->stats.total;
+
+ if (gt->stats.active)
+ total = ktime_add(total,
+ ktime_sub(ktime_get(), gt->stats.start));
+
+ return total;
+}
+
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
+{
+ unsigned int seq;
+ ktime_t total;
+
+ do {
+ seq = read_seqcount_begin(&gt->stats.lock);
+ total = __intel_gt_get_awake_time(gt);
+ } while (read_seqcount_retry(&gt->stats.lock, seq));
+
+ return total;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_gt_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
new file mode 100644
index 000000000..6c9a46452
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_H
+#define INTEL_GT_PM_H
+
+#include <linux/types.h>
+
+#include "intel_gt_types.h"
+#include "intel_wakeref.h"
+
+static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
+{
+ return intel_wakeref_is_active(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_wakeref_get(&gt->wakeref);
+}
+
+static inline void __intel_gt_pm_get(struct intel_gt *gt)
+{
+ __intel_wakeref_get(&gt->wakeref);
+}
+
+static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
+{
+ return intel_wakeref_get_if_active(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_might_get(struct intel_gt *gt)
+{
+ intel_wakeref_might_get(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_put(struct intel_gt *gt)
+{
+ intel_wakeref_put(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_put_async(struct intel_gt *gt)
+{
+ intel_wakeref_put_async(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_might_put(struct intel_gt *gt)
+{
+ intel_wakeref_might_put(&gt->wakeref);
+}
+
+#define with_intel_gt_pm(gt, tmp) \
+ for (tmp = 1, intel_gt_pm_get(gt); tmp; \
+ intel_gt_pm_put(gt), tmp = 0)
+
+/**
+ * with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
+ * it to sleep, run some code and then asynchrously put the reference
+ * away.
+ *
+ * @gt: pointer to the gt
+ * @wf: pointer to a temporary wakeref.
+ */
+#define with_intel_gt_pm_if_awake(gt, wf) \
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+
+static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
+{
+ return intel_wakeref_wait_for_idle(&gt->wakeref);
+}
+
+void intel_gt_pm_init_early(struct intel_gt *gt);
+void intel_gt_pm_init(struct intel_gt *gt);
+void intel_gt_pm_fini(struct intel_gt *gt);
+
+void intel_gt_suspend_prepare(struct intel_gt *gt);
+void intel_gt_suspend_late(struct intel_gt *gt);
+int intel_gt_resume(struct intel_gt *gt);
+
+void intel_gt_runtime_suspend(struct intel_gt *gt);
+int intel_gt_runtime_resume(struct intel_gt *gt);
+
+ktime_t intel_gt_get_awake_time(const struct intel_gt *gt);
+
+static inline bool is_mock_gt(const struct intel_gt *gt)
+{
+ return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
+}
+
+#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
new file mode 100644
index 000000000..108b9e76c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/seq_file.h>
+#include <linux/string_helpers.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_pm_debugfs.h"
+#include "intel_gt_regs.h"
+#include "intel_llc.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pcode.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+#include "intel_runtime_pm.h"
+#include "intel_uncore.h"
+#include "vlv_sideband.h"
+
+void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
+{
+ atomic_inc(&gt->user_wakeref);
+ intel_gt_pm_get(gt);
+ if (GRAPHICS_VER(gt->i915) >= 6)
+ intel_uncore_forcewake_user_get(gt->uncore);
+}
+
+void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
+{
+ if (GRAPHICS_VER(gt->i915) >= 6)
+ intel_uncore_forcewake_user_put(gt->uncore);
+ intel_gt_pm_put(gt);
+ atomic_dec(&gt->user_wakeref);
+}
+
+static int forcewake_user_open(struct inode *inode, struct file *file)
+{
+ struct intel_gt *gt = inode->i_private;
+
+ intel_gt_pm_debugfs_forcewake_user_open(gt);
+
+ return 0;
+}
+
+static int forcewake_user_release(struct inode *inode, struct file *file)
+{
+ struct intel_gt *gt = inode->i_private;
+
+ intel_gt_pm_debugfs_forcewake_user_release(gt);
+
+ return 0;
+}
+
+static const struct file_operations forcewake_user_fops = {
+ .owner = THIS_MODULE,
+ .open = forcewake_user_open,
+ .release = forcewake_user_release,
+};
+
+static int fw_domains_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_uncore_forcewake_domain *fw_domain;
+ unsigned int tmp;
+
+ seq_printf(m, "user.bypass_count = %u\n",
+ uncore->user_forcewake_count);
+
+ for_each_fw_domain(fw_domain, uncore, tmp)
+ seq_printf(m, "%s.wake_count = %u\n",
+ intel_uncore_forcewake_domain_to_str(fw_domain->id),
+ READ_ONCE(fw_domain->wake_count));
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
+
+static void print_rc6_res(struct seq_file *m,
+ const char *title,
+ const i915_reg_t reg)
+{
+ struct intel_gt *gt = m->private;
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ seq_printf(m, "%s %u (%llu us)\n", title,
+ intel_uncore_read(gt->uncore, reg),
+ intel_rc6_residency_us(&gt->rc6, reg));
+}
+
+static int vlv_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rcctl1, pw_status, mt_fwake_req;
+
+ mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
+ pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
+ rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+
+ seq_printf(m, "RC6 Enabled: %s\n",
+ str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
+ seq_printf(m, "Render Power Well: %s\n",
+ (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+ print_rc6_res(m, "Render RC6 residency since boot:", GEN6_GT_GFX_RC6);
+ print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
+
+ return fw_domains_show(m, NULL);
+}
+
+static int gen6_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 gt_core_status, mt_fwake_req, rcctl1, rc6vids = 0;
+ u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
+
+ mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
+ gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
+
+ rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ if (GRAPHICS_VER(i915) >= 9) {
+ gen9_powergate_enable =
+ intel_uncore_read(uncore, GEN9_PG_ENABLE);
+ gen9_powergate_status =
+ intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
+ }
+
+ if (GRAPHICS_VER(i915) <= 7)
+ snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
+
+ seq_printf(m, "RC1e Enabled: %s\n",
+ str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ if (GRAPHICS_VER(i915) >= 9) {
+ seq_printf(m, "Render Well Gating Enabled: %s\n",
+ str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+ seq_printf(m, "Media Well Gating Enabled: %s\n",
+ str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+ }
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
+ str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
+ str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ seq_puts(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ seq_puts(m, "Core Power Down\n");
+ else
+ seq_puts(m, "on\n");
+ break;
+ case GEN6_RC3:
+ seq_puts(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ seq_puts(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ seq_puts(m, "RC7\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "Core Power Down: %s\n",
+ str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
+ if (GRAPHICS_VER(i915) >= 9) {
+ seq_printf(m, "Render Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ }
+
+ /* Not exactly sure what this is */
+ print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
+ GEN6_GT_GFX_RC6_LOCKED);
+ print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
+ print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
+ print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
+
+ if (GRAPHICS_VER(i915) <= 7) {
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+ }
+
+ return fw_domains_show(m, NULL);
+}
+
+static int ilk_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rgvmodectl, rstdbyctl;
+ u16 crstandvid;
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+ rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
+ crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
+
+ seq_printf(m, "HD boost: %s\n",
+ str_yes_no(rgvmodectl & MEMMODE_BOOST_EN));
+ seq_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ seq_printf(m, "HW control enabled: %s\n",
+ str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN));
+ seq_printf(m, "SW control enabled: %s\n",
+ str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN));
+ seq_printf(m, "Gated voltage change: %s\n",
+ str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE));
+ seq_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ seq_printf(m, "Max P-state: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
+ str_yes_no(!(rstdbyctl & RCX_SW_EXIT)));
+ seq_puts(m, "Current RS state: ");
+ switch (rstdbyctl & RSX_STATUS_MASK) {
+ case RSX_STATUS_ON:
+ seq_puts(m, "on\n");
+ break;
+ case RSX_STATUS_RC1:
+ seq_puts(m, "RC1\n");
+ break;
+ case RSX_STATUS_RC1E:
+ seq_puts(m, "RC1E\n");
+ break;
+ case RSX_STATUS_RS1:
+ seq_puts(m, "RS1\n");
+ break;
+ case RSX_STATUS_RS2:
+ seq_puts(m, "RS2 (RC6)\n");
+ break;
+ case RSX_STATUS_RS3:
+ seq_puts(m, "RC3 (RC6+)\n");
+ break;
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int drpc_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ intel_wakeref_t wakeref;
+ int err = -ENODEV;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_drpc(m);
+ else if (GRAPHICS_VER(i915) >= 6)
+ err = gen6_drpc(m);
+ else
+ err = ilk_drpc(m);
+ }
+
+ return err;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc);
+
+void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ if (GRAPHICS_VER(i915) == 5) {
+ u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
+
+ drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f);
+ drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ drm_printf(p, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ u32 rpmodectl, freq_sts;
+
+ rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+ drm_printf(p, "Video Turbo Mode: %s\n",
+ str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ drm_printf(p, "HW control enabled: %s\n",
+ str_yes_no(rpmodectl & GEN6_RP_ENABLE));
+ drm_printf(p, "SW control enabled: %s\n",
+ str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
+
+ vlv_punit_get(i915);
+ freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(i915);
+
+ drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+ drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq);
+
+ drm_printf(p, "actual GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
+
+ drm_printf(p, "current GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->cur_freq));
+
+ drm_printf(p, "max GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+
+ drm_printf(p, "min GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->min_freq));
+
+ drm_printf(p, "idle GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->idle_freq));
+
+ drm_printf(p, "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(rps, rps->efficient_freq));
+ } else if (GRAPHICS_VER(i915) >= 6) {
+ u32 rp_state_limits;
+ u32 gt_perf_status;
+ struct intel_rps_freq_caps caps;
+ u32 rpmodectl, rpinclimit, rpdeclimit;
+ u32 rpstat, cagf, reqf;
+ u32 rpcurupei, rpcurup, rpprevup;
+ u32 rpcurdownei, rpcurdown, rpprevdown;
+ u32 rpupei, rpupt, rpdownei, rpdownt;
+ u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
+
+ rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
+ gen6_rps_get_freq_caps(rps, &caps);
+ if (IS_GEN9_LP(i915))
+ gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
+ else
+ gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
+
+ /* RPSTAT1 is in the GT power well */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+ if (GRAPHICS_VER(i915) >= 9) {
+ reqf >>= 23;
+ } else {
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ }
+ reqf = intel_gpu_freq(rps, reqf);
+
+ rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+ rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
+ rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+
+ rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+ rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+
+ rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+ rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ cagf = intel_rps_read_actual_frequency(rps);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ if (GRAPHICS_VER(i915) >= 11) {
+ pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (GRAPHICS_VER(i915) >= 8) {
+ pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
+ pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
+ pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
+ pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
+ } else {
+ pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+ pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+ pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
+ pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
+ }
+ pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
+
+ drm_printf(p, "Video Turbo Mode: %s\n",
+ str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ drm_printf(p, "HW control enabled: %s\n",
+ str_yes_no(rpmodectl & GEN6_RP_ENABLE));
+ drm_printf(p, "SW control enabled: %s\n",
+ str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
+
+ drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (GRAPHICS_VER(i915) <= 10)
+ drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
+ drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n",
+ rps->pm_intrmsk_mbz);
+ drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ drm_printf(p, "Render p-state ratio: %d\n",
+ (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+ drm_printf(p, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ drm_printf(p, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+ drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat);
+ drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl);
+ drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+ drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+ drm_printf(p, "RPNSWREQ: %dMHz\n", reqf);
+ drm_printf(p, "CAGF: %dMHz\n", cagf);
+ drm_printf(p, "RP CUR UP EI: %d (%lldns)\n",
+ rpcurupei,
+ intel_gt_pm_interval_to_ns(gt, rpcurupei));
+ drm_printf(p, "RP CUR UP: %d (%lldns)\n",
+ rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
+ drm_printf(p, "RP PREV UP: %d (%lldns)\n",
+ rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
+ drm_printf(p, "Up threshold: %d%%\n",
+ rps->power.up_threshold);
+ drm_printf(p, "RP UP EI: %d (%lldns)\n",
+ rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
+ drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n",
+ rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
+
+ drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n",
+ rpcurdownei,
+ intel_gt_pm_interval_to_ns(gt, rpcurdownei));
+ drm_printf(p, "RP CUR DOWN: %d (%lldns)\n",
+ rpcurdown,
+ intel_gt_pm_interval_to_ns(gt, rpcurdown));
+ drm_printf(p, "RP PREV DOWN: %d (%lldns)\n",
+ rpprevdown,
+ intel_gt_pm_interval_to_ns(gt, rpprevdown));
+ drm_printf(p, "Down threshold: %d%%\n",
+ rps->power.down_threshold);
+ drm_printf(p, "RP DOWN EI: %d (%lldns)\n",
+ rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
+ drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n",
+ rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
+
+ drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
+ intel_gpu_freq(rps, caps.min_freq));
+ drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
+ intel_gpu_freq(rps, caps.rp1_freq));
+ drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ intel_gpu_freq(rps, caps.rp0_freq));
+ drm_printf(p, "Max overclocked frequency: %dMHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+
+ drm_printf(p, "Current freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->cur_freq));
+ drm_printf(p, "Actual freq: %d MHz\n", cagf);
+ drm_printf(p, "Idle freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->idle_freq));
+ drm_printf(p, "Min freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->min_freq));
+ drm_printf(p, "Boost freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->boost_freq));
+ drm_printf(p, "Max freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+ drm_printf(p,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(rps, rps->efficient_freq));
+ } else {
+ drm_puts(p, "no P-state info available\n");
+ }
+
+ drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
+ drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
+ drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+}
+
+static int frequency_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_gt_pm_frequency_dump(gt, &p);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency);
+
+static int llc_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ const bool edram = GRAPHICS_VER(i915) > 8;
+ struct intel_rps *rps = &gt->rps;
+ unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
+ int gpu_freq, ia_freq;
+
+ seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915)));
+ seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
+ i915->edram_size_mb);
+
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+ ia_freq = gpu_freq;
+ snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq, NULL);
+ seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+ intel_gpu_freq(rps,
+ (gpu_freq *
+ (IS_GEN9_BC(i915) ||
+ GRAPHICS_VER(i915) >= 11 ?
+ GEN9_FREQ_SCALER : 1))),
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
+ }
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ return 0;
+}
+
+static bool llc_eval(void *data)
+{
+ struct intel_gt *gt = data;
+
+ return HAS_LLC(gt->i915);
+}
+
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc);
+
+static const char *rps_power_to_str(unsigned int power)
+{
+ static const char * const strings[] = {
+ [LOW_POWER] = "low power",
+ [BETWEEN] = "mixed",
+ [HIGH_POWER] = "high power",
+ };
+
+ if (power >= ARRAY_SIZE(strings) || !strings[power])
+ return "unknown";
+
+ return strings[power];
+}
+
+static int rps_boost_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_rps *rps = &gt->rps;
+
+ seq_printf(m, "RPS enabled? %s\n",
+ str_yes_no(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n",
+ str_yes_no(intel_rps_is_active(rps)));
+ seq_printf(m, "GPU busy? %s, %llums\n",
+ str_yes_no(gt->awake),
+ ktime_to_ms(intel_gt_get_awake_time(gt)));
+ seq_printf(m, "Boosts outstanding? %d\n",
+ atomic_read(&rps->num_waiters));
+ seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
+ seq_printf(m, "Frequency requested %d, actual %d\n",
+ intel_gpu_freq(rps, rps->cur_freq),
+ intel_rps_read_actual_frequency(rps));
+ seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq));
+ seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
+ intel_gpu_freq(rps, rps->idle_freq),
+ intel_gpu_freq(rps, rps->efficient_freq),
+ intel_gpu_freq(rps, rps->boost_freq));
+
+ seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
+
+ if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rpup, rpupei;
+ u32 rpdown, rpdownei;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
+ rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
+ rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
+ rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
+ rps_power_to_str(rps->power.mode));
+ seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
+ rpup && rpupei ? 100 * rpup / rpupei : 0,
+ rps->power.up_threshold);
+ seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
+ rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
+ rps->power.down_threshold);
+ } else {
+ seq_puts(m, "\nRPS Autotuning inactive\n");
+ }
+
+ return 0;
+}
+
+static bool rps_eval(void *data)
+{
+ struct intel_gt *gt = data;
+
+ return HAS_RPS(gt->i915);
+}
+
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
+
+void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "drpc", &drpc_fops, NULL },
+ { "frequency", &frequency_fops, NULL },
+ { "forcewake", &fw_domains_fops, NULL },
+ { "forcewake_user", &forcewake_user_fops, NULL},
+ { "llc", &llc_fops, llc_eval },
+ { "rps_boost", &rps_boost_fops, rps_eval },
+ };
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
new file mode 100644
index 000000000..0ace8c2da
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_DEBUGFS_H
+#define INTEL_GT_PM_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+struct drm_printer;
+
+void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root);
+void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *m);
+
+/* functions that need to be accessed by the upper level non-gt interfaces */
+void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt);
+void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt);
+
+#endif /* INTEL_GT_PM_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
new file mode 100644
index 000000000..52f2a28b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_gt_regs.h"
+
+static void write_pm_imr(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_imr;
+ i915_reg_t reg;
+
+ if (GRAPHICS_VER(i915) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+ mask <<= 16; /* pm is in upper half */
+ } else if (GRAPHICS_VER(i915) >= 8) {
+ reg = GEN8_GT_IMR(2);
+ } else {
+ reg = GEN6_PMIMR;
+ }
+
+ intel_uncore_write(uncore, reg, mask);
+}
+
+static void gen6_gt_pm_update_irq(struct intel_gt *gt,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ u32 new_val;
+
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+ lockdep_assert_held(gt->irq_lock);
+
+ new_val = gt->pm_imr;
+ new_val &= ~interrupt_mask;
+ new_val |= ~enabled_irq_mask & interrupt_mask;
+
+ if (new_val != gt->pm_imr) {
+ gt->pm_imr = new_val;
+ write_pm_imr(gt);
+ }
+}
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask)
+{
+ gen6_gt_pm_update_irq(gt, mask, mask);
+}
+
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
+{
+ gen6_gt_pm_update_irq(gt, mask, 0);
+}
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ intel_uncore_write(uncore, reg, reset_mask);
+ intel_uncore_write(uncore, reg, reset_mask);
+ intel_uncore_posting_read(uncore, reg);
+}
+
+static void write_pm_ier(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_ier;
+ i915_reg_t reg;
+
+ if (GRAPHICS_VER(i915) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ mask <<= 16; /* pm is in upper half */
+ } else if (GRAPHICS_VER(i915) >= 8) {
+ reg = GEN8_GT_IER(2);
+ } else {
+ reg = GEN6_PMIER;
+ }
+
+ intel_uncore_write(uncore, reg, mask);
+}
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
+{
+ lockdep_assert_held(gt->irq_lock);
+
+ gt->pm_ier |= enable_mask;
+ write_pm_ier(gt);
+ gen6_gt_pm_unmask_irq(gt, enable_mask);
+}
+
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
+{
+ lockdep_assert_held(gt->irq_lock);
+
+ gt->pm_ier &= ~disable_mask;
+ gen6_gt_pm_mask_irq(gt, disable_mask);
+ write_pm_ier(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
new file mode 100644
index 000000000..ff766966d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_IRQ_H
+#define INTEL_GT_PM_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask);
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask);
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask);
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask);
+
+#endif /* INTEL_GT_PM_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
new file mode 100644
index 000000000..dd006563c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -0,0 +1,1591 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_REGS__
+#define __INTEL_GT_REGS__
+
+#include "i915_reg_defs.h"
+
+/* RPM unit config (Gen8+) */
+#define RPM_CONFIG0 _MMIO(0xd00)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
+#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
+
+#define RPM_CONFIG1 _MMIO(0xd04)
+#define GEN10_GT_NOA_ENABLE (1 << 9)
+
+/* RCP unit config (Gen8+) */
+#define RCP_CONFIG _MMIO(0xd08)
+
+#define RC6_LOCATION _MMIO(0xd40)
+#define RC6_CTX_IN_DRAM (1 << 0)
+#define RC6_CTX_BASE _MMIO(0xd48)
+#define RC6_CTX_BASE_MASK 0xFFFFFFF0
+
+#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0xd50 + (n) * 4)
+#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0xd70 + (n) * 4)
+#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0xd84)
+#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0xd88)
+
+#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
+#define SF_MCR_SELECTOR _MMIO(0xfd8)
+#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
+#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
+#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
+#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
+#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
+#define GEN11_MCR_MULTICAST REG_BIT(31)
+#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
+#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
+#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
+#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
+
+#define IPEIR_I965 _MMIO(0x2064)
+#define IPEHR_I965 _MMIO(0x2068)
+
+/*
+ * On GEN4, only the render ring INSTDONE exists and has a different
+ * layout than the GEN7+ version.
+ * The GEN2 counterpart of this register is GEN2_INSTDONE.
+ */
+#define INSTPS _MMIO(0x2070) /* 965+ only */
+#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
+#define ACTHD_I965 _MMIO(0x2074)
+#define HWS_PGA _MMIO(0x2080)
+#define HWS_ADDRESS_MASK 0xfffff000
+#define HWS_START_ADDRESS_SHIFT 4
+
+#define _3D_CHICKEN _MMIO(0x2084)
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
+
+#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
+#define PWRCTX_EN (1 << 0)
+
+#define FF_SLICE_CHICKEN _MMIO(0x2088)
+#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior. The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN2 _MMIO(0x208c)
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+#define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+
+#define _3D_CHICKEN3 _MMIO(0x2090)
+#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
+#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
+#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */
+#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
+
+#define GEN2_INSTDONE _MMIO(0x2090)
+#define NOPID _MMIO(0x2094)
+#define HWSTAM _MMIO(0x2098)
+
+#define WAIT_FOR_RC6_EXIT _MMIO(0x20cc)
+/* HSW only */
+#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2
+#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT)
+#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT)
+/* HSW+ */
+#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0)
+#define HSW_RCS_CONTEXT_ENABLE (1 << 7)
+#define HSW_RCS_INHIBIT (1 << 8)
+/* Gen8 */
+#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
+#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
+#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6)
+#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9
+#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT)
+#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11
+#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT)
+#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
+
+#define GEN6_GT_MODE _MMIO(0x20d0)
+#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
+#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
+#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
+#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
+#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
+
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20d4)
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
+
+#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
+#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14)
+
+#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
+#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
+#define GEN12_PERF_FIX_BALANCING_CFE_DISABLE REG_BIT(15)
+
+#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
+#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20ec)
+#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0)
+
+/* WaClearTdlStateAckDirtyBits */
+#define GEN8_STATE_ACK _MMIO(0x20f0)
+#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20f8)
+#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
+#define GEN9_STATE_ACK_TDL0 (1 << 12)
+#define GEN9_STATE_ACK_TDL1 (1 << 13)
+#define GEN9_STATE_ACK_TDL2 (1 << 14)
+#define GEN9_STATE_ACK_TDL3 (1 << 15)
+#define GEN9_SUBSLICE_TDL_ACK_BITS \
+ (GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
+ GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
+
+#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define CM0_IZ_OPT_DISABLE (1 << 6)
+#define CM0_ZR_OPT_DISABLE (1 << 5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define CM0_RC_OP_FLUSH_DISABLE (1 << 0)
+
+#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */
+
+/*
+ * Logical Context regs
+ */
+/*
+ * Notes on SNB/IVB/VLV context size:
+ * - Power context is saved elsewhere (LLC or stolen)
+ * - Ring/execlist context is saved on SNB, not on IVB
+ * - Extended context size already includes render context size
+ * - We always need to follow the extended context size.
+ * SNB BSpec has comments indicating that we should use the
+ * render context size instead if execlists are disabled, but
+ * based on empirical testing that's just nonsense.
+ * - Pipelined/VF state is saved on SNB/IVB respectively
+ * - GT1 size just indicates how much of render context
+ * doesn't need saving on GT1
+ */
+#define CXT_SIZE _MMIO(0x21a0)
+#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
+ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+ GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE _MMIO(0x21a8)
+#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
+#define HSW_MI_PREDICATE_RESULT_2 _MMIO(0x2214)
+
+#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
+#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+
+#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
+#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
+
+#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
+#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
+
+#define HS_INVOCATION_COUNT _MMIO(0x2300)
+#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4)
+#define DS_INVOCATION_COUNT _MMIO(0x2308)
+#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4)
+#define IA_VERTICES_COUNT _MMIO(0x2310)
+#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4)
+#define IA_PRIMITIVES_COUNT _MMIO(0x2318)
+#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4)
+#define VS_INVOCATION_COUNT _MMIO(0x2320)
+#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4)
+#define GS_INVOCATION_COUNT _MMIO(0x2328)
+#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4)
+#define GS_PRIMITIVES_COUNT _MMIO(0x2330)
+#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4)
+#define CL_INVOCATION_COUNT _MMIO(0x2338)
+#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4)
+#define CL_PRIMITIVES_COUNT _MMIO(0x2340)
+#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4)
+#define PS_INVOCATION_COUNT _MMIO(0x2348)
+#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4)
+#define PS_DEPTH_COUNT _MMIO(0x2350)
+#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4)
+#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420)
+#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430)
+#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434)
+#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438)
+#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243c)
+#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440)
+#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500)
+#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
+#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
+
+#define GFX_MODE _MMIO(0x2520)
+
+#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
+#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
+#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
+#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
+
+#define DRAW_WATERMARK _MMIO(0x26c0)
+#define VERT_WM_VAL REG_GENMASK(9, 0)
+
+#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+
+#define RENDER_HWS_PGA_GEN7 _MMIO(0x4080)
+
+#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
+#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
+
+#define GAM_ECOCHK _MMIO(0x4090)
+#define BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define ECOCHK_SNB_BIT (1 << 10)
+#define ECOCHK_DIS_TLB (1 << 8)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6)
+#define ECOCHK_PPGTT_CACHE64B (0x3 << 3)
+#define ECOCHK_PPGTT_CACHE4B (0x0 << 3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3)
+#define ECOCHK_PPGTT_UC_HSW (0x1 << 3)
+#define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
+#define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
+
+#define GEN8_RING_FAULT_REG _MMIO(0x4094)
+#define _RING_FAULT_REG_RCS 0x4094
+#define _RING_FAULT_REG_VCS 0x4194
+#define _RING_FAULT_REG_BCS 0x4294
+#define _RING_FAULT_REG_VECS 0x4394
+#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \
+ _RING_FAULT_REG_RCS, \
+ _RING_FAULT_REG_VCS, \
+ _RING_FAULT_REG_VECS, \
+ _RING_FAULT_REG_BCS))
+
+#define ERROR_GEN6 _MMIO(0x40a0)
+
+#define DONE_REG _MMIO(0x40b0)
+#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
+#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
+#define BSD_HWS_PGA_GEN7 _MMIO(0x4180)
+
+#define GEN12_CCS_AUX_INV _MMIO(0x4208)
+#define GEN12_VD0_AUX_INV _MMIO(0x4218)
+#define GEN12_VE0_AUX_INV _MMIO(0x4238)
+#define GEN12_BCS0_AUX_INV _MMIO(0x4248)
+
+#define GEN8_RTCR _MMIO(0x4260)
+#define GEN8_M1TCR _MMIO(0x4264)
+#define GEN8_M2TCR _MMIO(0x4268)
+#define GEN8_BTCR _MMIO(0x426c)
+#define GEN8_VTCR _MMIO(0x4270)
+
+#define BLT_HWS_PGA_GEN7 _MMIO(0x4280)
+
+#define GEN12_VD2_AUX_INV _MMIO(0x4298)
+#define GEN12_CCS0_AUX_INV _MMIO(0x42c8)
+#define AUX_INV REG_BIT(0)
+
+#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380)
+
+#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+
+#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+
+#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
+
+#define XEHP_TILE0_ADDR_RANGE _MMIO(0x4900)
+#define XEHP_TILE_LMEM_RANGE_SHIFT 8
+
+#define XEHP_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
+#define XEHP_CCS_BASE_SHIFT 8
+
+#define GAMTARBMODE _MMIO(0x4a08)
+#define ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define ARB_MODE_SWIZZLE_BDW (1 << 1)
+
+#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18)
+
+#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
+#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
+
+#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
+#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
+
+#define GEN11_GACB_PERF_CTRL _MMIO(0x4b80)
+#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0)
+#define GEN11_HASH_CTRL_BIT0 (1 << 0)
+#define GEN11_HASH_CTRL_BIT4 (1 << 12)
+
+/* gamt regs */
+#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
+#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
+#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
+#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
+#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
+
+#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
+#define MMCD_PCLA (1 << 31)
+#define MMCD_HOTSPOT_EN (1 << 27)
+
+/* There are the 4 64-bit counter registers, one for each stream output */
+#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8)
+#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4)
+
+#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
+#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
+
+#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
+#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
+
+#define CHICKEN_RASTER_1 _MMIO(0x6204)
+#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+
+#define CHICKEN_RASTER_2 _MMIO(0x6208)
+#define TBIMR_FAST_CLIP REG_BIT(5)
+
+#define VFLSKPD _MMIO(0x62a8)
+#define DIS_OVER_FETCH_CACHE REG_BIT(1)
+#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
+
+#define FF_MODE2 _MMIO(0x6604)
+#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
+#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
+#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
+#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
+
+#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c)
+
+#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
+#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6)
+#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6)
+#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1)
+
+#define GEN7_GT_MODE _MMIO(0x7008)
+#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
+#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
+#define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10)
+#define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
+
+#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+#define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
+#define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
+#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
+#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+
+#define HIZ_CHICKEN _MMIO(0x7018)
+#define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15)
+#define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
+#define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3)
+
+#define GEN8_L3CNTLREG _MMIO(0x7034)
+#define GEN8_ERRDETBCTRL (1 << 9)
+
+#define GEN7_SC_INSTDONE _MMIO(0x7100)
+#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
+#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
+
+/* GEN8 chicken */
+#define HDC_CHICKEN0 _MMIO(0x7300)
+#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15)
+#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14)
+#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11)
+#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5)
+#define HDC_FORCE_NON_COHERENT (1 << 4)
+#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
+
+#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
+
+#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
+#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
+#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12)
+#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
+#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
+
+/* GEN9 chicken */
+#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
+#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
+
+#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
+#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
+
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
+
+#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+
+#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
+#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
+ ((slice) % 3) * 0x4)
+#define GEN9_PGCTL_SLICE_ACK (1 << 0)
+#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2))
+#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
+
+#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
+#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
+#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
+#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
+#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
+#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
+#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
+#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
+#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
+#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
+#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
+
+#define VF_PREEMPTION _MMIO(0x83a4)
+#define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0)
+
+#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+
+#define GEN12_SQCM _MMIO(0x8724)
+#define EN_32B_ACCESS REG_BIT(30)
+
+#define HSW_IDICR _MMIO(0x9008)
+#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+
+#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3 << 21)
+#define GEN6_MBC_SNPCR_MAX (0 << 21)
+#define GEN6_MBC_SNPCR_MED (1 << 21)
+#define GEN6_MBC_SNPCR_LOW (2 << 21)
+#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */
+
+#define VLV_G3DCTL _MMIO(0x9024)
+#define VLV_GSCKGCTL _MMIO(0x9028)
+
+/* WaCatErrorRejectionIssue */
+#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11)
+
+#define FBC_LLC_READ_CTRL _MMIO(0x9044)
+#define FBC_LLC_FULLY_OPEN REG_BIT(30)
+
+#define GEN6_MBCTL _MMIO(0x907c)
+#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
+#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
+
+/* Fuse readout registers for GT */
+#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT 4
+#define GEN10_L3BANK_MASK 0x0F
+/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
+#define GEN12_MAX_MSLICES 4
+#define GEN12_MEML3_EN_MASK 0x0F
+
+#define HSW_PAVP_FUSE1 _MMIO(0x911c)
+#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
+#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16)
+#define HSW_F1_EU_DIS_10EUS 0
+#define HSW_F1_EU_DIS_8EUS 1
+#define HSW_F1_EU_DIS_6EUS 2
+
+#define GEN8_FUSE2 _MMIO(0x9120)
+#define GEN8_F2_SS_DIS_SHIFT 21
+#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
+#define GEN8_F2_S_ENA_SHIFT 25
+#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
+#define GEN9_F2_SS_DIS_SHIFT 20
+#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
+#define GEN10_F2_S_ENA_SHIFT 22
+#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
+#define GEN10_F2_SS_DIS_SHIFT 18
+#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+
+#define GEN8_EU_DISABLE0 _MMIO(0x9134)
+#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
+#define GEN11_EU_DISABLE _MMIO(0x9134)
+#define GEN8_EU_DIS0_S0_MASK 0xffffff
+#define GEN8_EU_DIS0_S1_SHIFT 24
+#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
+#define GEN11_EU_DIS_MASK 0xFF
+#define XEHP_EU_ENABLE _MMIO(0x9134)
+#define XEHP_EU_ENA_MASK 0xFF
+
+#define GEN8_EU_DISABLE1 _MMIO(0x9138)
+#define GEN8_EU_DIS1_S1_MASK 0xffff
+#define GEN8_EU_DIS1_S2_SHIFT 16
+#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
+
+#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
+#define GEN11_GT_S_ENA_MASK 0xFF
+
+#define GEN8_EU_DISABLE2 _MMIO(0x913c)
+#define GEN8_EU_DIS2_S2_MASK 0xff
+
+#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913c)
+#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913c)
+
+#define GEN10_EU_DISABLE3 _MMIO(0x9140)
+#define GEN10_EU_DIS_SS_MASK 0xff
+#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
+#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
+#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
+#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
+
+#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
+#define XEHPC_GT_COMPUTE_DSS_ENABLE_EXT _MMIO(0x9148)
+
+#define GEN6_UCGCTL1 _MMIO(0x9400)
+#define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
+#define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
+#define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+#define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
+
+#define GEN6_UCGCTL2 _MMIO(0x9404)
+#define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
+#define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
+#define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+#define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
+#define GEN6_UCGCTL3 _MMIO(0x9408)
+#define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
+
+#define GEN7_UCGCTL4 _MMIO(0x940c)
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14)
+
+#define GEN6_RCGCTL1 _MMIO(0x9410)
+#define GEN6_RCGCTL2 _MMIO(0x9414)
+
+#define GEN6_GDRST _MMIO(0x941c)
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+#define GEN6_GRDOM_VECS (1 << 4)
+#define GEN9_GRDOM_GUC (1 << 5)
+#define GEN8_GRDOM_MEDIA2 (1 << 7)
+/* GEN11 changed all bit defs except for FULL & RENDER */
+#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL
+#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER
+#define XEHPC_GRDOM_BLT8 REG_BIT(31)
+#define XEHPC_GRDOM_BLT7 REG_BIT(30)
+#define XEHPC_GRDOM_BLT6 REG_BIT(29)
+#define XEHPC_GRDOM_BLT5 REG_BIT(28)
+#define XEHPC_GRDOM_BLT4 REG_BIT(27)
+#define XEHPC_GRDOM_BLT3 REG_BIT(26)
+#define XEHPC_GRDOM_BLT2 REG_BIT(25)
+#define XEHPC_GRDOM_BLT1 REG_BIT(24)
+#define GEN11_GRDOM_SFC3 REG_BIT(20)
+#define GEN11_GRDOM_SFC2 REG_BIT(19)
+#define GEN11_GRDOM_SFC1 REG_BIT(18)
+#define GEN11_GRDOM_SFC0 REG_BIT(17)
+#define GEN11_GRDOM_VECS4 REG_BIT(16)
+#define GEN11_GRDOM_VECS3 REG_BIT(15)
+#define GEN11_GRDOM_VECS2 REG_BIT(14)
+#define GEN11_GRDOM_VECS REG_BIT(13)
+#define GEN11_GRDOM_MEDIA8 REG_BIT(12)
+#define GEN11_GRDOM_MEDIA7 REG_BIT(11)
+#define GEN11_GRDOM_MEDIA6 REG_BIT(10)
+#define GEN11_GRDOM_MEDIA5 REG_BIT(9)
+#define GEN11_GRDOM_MEDIA4 REG_BIT(8)
+#define GEN11_GRDOM_MEDIA3 REG_BIT(7)
+#define GEN11_GRDOM_MEDIA2 REG_BIT(6)
+#define GEN11_GRDOM_MEDIA REG_BIT(5)
+#define GEN11_GRDOM_GUC REG_BIT(3)
+#define GEN11_GRDOM_BLT REG_BIT(2)
+#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
+#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
+
+#define GEN6_RSTCTL _MMIO(0x9420)
+
+#define GEN7_MISCCPCTL _MMIO(0x9424)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
+#define GEN12_DOP_CLOCK_GATE_RENDER_ENABLE REG_BIT(1)
+#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
+#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
+#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6)
+
+#define GEN8_UCGCTL6 _MMIO(0x9430)
+#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24)
+#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
+
+#define UNSLCGCTL9430 _MMIO(0x9430)
+#define MSQDUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
+#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
+#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
+#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
+#define HSUNIT_CLKGATE_DIS REG_BIT(8)
+#define VSUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLCGCTL9440 _MMIO(0x9440)
+#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
+#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
+#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
+#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
+
+#define UNSLCGCTL9444 _MMIO(0x9444)
+#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
+#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
+#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
+#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
+#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
+#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
+#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
+#define LTCDD_CLKGATE_DIS REG_BIT(10)
+
+#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
+#define SARBUNIT_CLKGATE_DIS (1 << 5)
+#define RCCUNIT_CLKGATE_DIS (1 << 7)
+#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define NODEDSS_CLKGATE_DIS REG_BIT(12)
+#define L3_CLKGATE_DIS REG_BIT(16)
+#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
+
+#define SCCGCTL94DC _MMIO(0x94dc)
+#define CG3DDISURB REG_BIT(14)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
+#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
+#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
+#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28)
+#define GWUNIT_CLKGATE_DIS REG_BIT(16)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
+#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+
+#define SSMCGCTL9530 _MMIO(0x9530)
+#define RTFUNIT_CLKGATE_DIS REG_BIT(18)
+
+#define GEN10_DFR_RATIO_EN_AND_CHICKEN _MMIO(0x9550)
+#define DFR_DISABLE (1 << 9)
+
+#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
+#define CGPSF_CLKGATE_DIS (1 << 3)
+
+#define MICRO_BP0_0 _MMIO(0x9800)
+#define MICRO_BP0_2 _MMIO(0x9804)
+#define MICRO_BP0_1 _MMIO(0x9808)
+#define MICRO_BP1_0 _MMIO(0x980c)
+#define MICRO_BP1_2 _MMIO(0x9810)
+#define MICRO_BP1_1 _MMIO(0x9814)
+#define MICRO_BP2_0 _MMIO(0x9818)
+#define MICRO_BP2_2 _MMIO(0x981c)
+#define MICRO_BP2_1 _MMIO(0x9820)
+#define MICRO_BP3_0 _MMIO(0x9824)
+#define MICRO_BP3_2 _MMIO(0x9828)
+#define MICRO_BP3_1 _MMIO(0x982c)
+#define MICRO_BP_TRIGGER _MMIO(0x9830)
+#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834)
+#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
+#define MICRO_BP_FIRED_ARMED _MMIO(0x983c)
+
+#define GEN6_GFXPAUSE _MMIO(0xa000)
+#define GEN6_RPNSWREQ _MMIO(0xa008)
+#define GEN6_TURBO_DISABLE (1 << 31)
+#define GEN6_FREQUENCY(x) ((x) << 25)
+#define HSW_FREQUENCY(x) ((x) << 24)
+#define GEN9_FREQUENCY(x) ((x) << 23)
+#define GEN6_OFFSET(x) ((x) << 19)
+#define GEN6_AGGRESSIVE_TURBO (0 << 15)
+#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
+#define GEN9_IGNORE_SLICE_RATIO (0 << 0)
+#define GEN12_MEDIA_FREQ_RATIO REG_BIT(13)
+
+#define GEN6_RC_VIDEO_FREQ _MMIO(0xa00c)
+#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17)
+#define GEN6_RC_CTL_RC6_ENABLE (1 << 18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1 << 20)
+#define GEN6_RC_CTL_RC7_ENABLE (1 << 22)
+#define VLV_RC_CTL_CTX_RST_PARALLEL (1 << 24)
+#define GEN7_RC_CTL_TO_MODE (1 << 28)
+#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27)
+#define GEN6_RC_CTL_HW_ENABLE (1 << 31)
+#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xa010)
+#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xa014)
+#define GEN6_RPSTAT1 _MMIO(0xa01c)
+#define GEN6_CAGF_SHIFT 8
+#define HSW_CAGF_SHIFT 7
+#define GEN9_CAGF_SHIFT 23
+#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
+#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
+#define GEN6_RP_CONTROL _MMIO(0xa024)
+#define GEN6_RP_MEDIA_TURBO (1 << 11)
+#define GEN6_RP_MEDIA_MODE_MASK (3 << 9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2 << 9)
+#define GEN6_RP_MEDIA_HW_MODE (1 << 9)
+#define GEN6_RP_MEDIA_SW_MODE (0 << 9)
+#define GEN6_RP_MEDIA_IS_GFX (1 << 8)
+#define GEN6_RP_ENABLE (1 << 7)
+#define GEN6_RP_UP_IDLE_MIN (0x1 << 3)
+#define GEN6_RP_UP_BUSY_AVG (0x2 << 3)
+#define GEN6_RP_UP_BUSY_CONT (0x4 << 3)
+#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0)
+#define GEN6_RPSWCTL_SHIFT 9
+#define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT)
+#define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT)
+#define GEN6_RP_UP_THRESHOLD _MMIO(0xa02c)
+#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xa030)
+#define GEN6_RP_CUR_UP_EI _MMIO(0xa050)
+#define GEN6_RP_EI_MASK 0xffffff
+#define GEN6_CURICONT_MASK GEN6_RP_EI_MASK
+#define GEN6_RP_CUR_UP _MMIO(0xa054)
+#define GEN6_CURBSYTAVG_MASK GEN6_RP_EI_MASK
+#define GEN6_RP_PREV_UP _MMIO(0xa058)
+#define GEN6_RP_CUR_DOWN_EI _MMIO(0xa05c)
+#define GEN6_CURIAVG_MASK GEN6_RP_EI_MASK
+#define GEN6_RP_CUR_DOWN _MMIO(0xa060)
+#define GEN6_RP_PREV_DOWN _MMIO(0xa064)
+#define GEN6_RP_UP_EI _MMIO(0xa068)
+#define GEN6_RP_DOWN_EI _MMIO(0xa06c)
+#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xa070)
+#define GEN6_RPDEUHWTC _MMIO(0xa080)
+#define GEN6_RPDEUC _MMIO(0xa084)
+#define GEN6_RPDEUCSW _MMIO(0xa088)
+#define GEN6_RC_CONTROL _MMIO(0xa090)
+#define GEN6_RC_STATE _MMIO(0xa094)
+#define RC_SW_TARGET_STATE_SHIFT 16
+#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT)
+#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xa098)
+#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xa09c)
+#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xa0a0)
+#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xa0a0)
+#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xa0a8)
+#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xa0ac)
+#define GEN6_RC_SLEEP _MMIO(0xa0b0)
+#define GEN6_RCUBMABDTMR _MMIO(0xa0b0)
+#define GEN6_RC1e_THRESHOLD _MMIO(0xa0b4)
+#define GEN6_RC6_THRESHOLD _MMIO(0xa0b8)
+#define GEN6_RC6p_THRESHOLD _MMIO(0xa0bc)
+#define VLV_RCEDATA _MMIO(0xa0bc)
+#define GEN6_RC6pp_THRESHOLD _MMIO(0xa0c0)
+#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xa0c4)
+#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xa0c8)
+
+#define GEN6_PMINTRMSK _MMIO(0xa168)
+#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31)
+#define ARAT_EXPIRED_INTRMSK (1 << 9)
+
+#define GEN8_MISC_CTRL0 _MMIO(0xa180)
+
+#define ECOBUS _MMIO(0xa180)
+#define FORCEWAKE_MT_ENABLE (1 << 5)
+
+#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
+#define FORCEWAKE_GT_GEN9 _MMIO(0xa188)
+#define FORCEWAKE _MMIO(0xa18c)
+
+#define VLV_SPAREG2H _MMIO(0xa194)
+
+#define GEN9_PG_ENABLE _MMIO(0xa210)
+#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
+#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
+#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
+#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n))
+#define VDN_MFX_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n))
+
+#define GEN8_PUSHBUS_CONTROL _MMIO(0xa248)
+#define GEN8_PUSHBUS_ENABLE _MMIO(0xa250)
+#define GEN8_PUSHBUS_SHIFT _MMIO(0xa25c)
+
+/* GPM unit config (Gen9+) */
+#define CTC_MODE _MMIO(0xa26c)
+#define CTC_SOURCE_PARAMETER_MASK 1
+#define CTC_SOURCE_CRYSTAL_CLOCK 0
+#define CTC_SOURCE_DIVIDE_LOGIC 1
+#define CTC_SHIFT_PARAMETER_SHIFT 1
+#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
+
+/* GPM MSG_IDLE */
+#define MSG_IDLE_CS _MMIO(0x8000)
+#define MSG_IDLE_VCS0 _MMIO(0x8004)
+#define MSG_IDLE_VCS1 _MMIO(0x8008)
+#define MSG_IDLE_BCS _MMIO(0x800C)
+#define MSG_IDLE_VECS0 _MMIO(0x8010)
+#define MSG_IDLE_VCS2 _MMIO(0x80C0)
+#define MSG_IDLE_VCS3 _MMIO(0x80C4)
+#define MSG_IDLE_VCS4 _MMIO(0x80C8)
+#define MSG_IDLE_VCS5 _MMIO(0x80CC)
+#define MSG_IDLE_VCS6 _MMIO(0x80D0)
+#define MSG_IDLE_VCS7 _MMIO(0x80D4)
+#define MSG_IDLE_VECS1 _MMIO(0x80D8)
+#define MSG_IDLE_VECS2 _MMIO(0x80DC)
+#define MSG_IDLE_VECS3 _MMIO(0x80E0)
+#define MSG_IDLE_FW_MASK REG_GENMASK(13, 9)
+#define MSG_IDLE_FW_SHIFT 9
+
+#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
+#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
+
+#define VLV_PWRDWNUPCTL _MMIO(0xa294)
+
+#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xa2a0)
+#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
+#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
+
+#define MISC_STATUS0 _MMIO(0xa500)
+#define MISC_STATUS1 _MMIO(0xa504)
+
+#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4)
+#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4)
+
+#define CHV_POWER_SS0_SIG1 _MMIO(0xa720)
+#define CHV_POWER_SS0_SIG2 _MMIO(0xa724)
+#define CHV_POWER_SS1_SIG1 _MMIO(0xa728)
+#define CHV_SS_PG_ENABLE (1 << 1)
+#define CHV_EU08_PG_ENABLE (1 << 9)
+#define CHV_EU19_PG_ENABLE (1 << 17)
+#define CHV_EU210_PG_ENABLE (1 << 25)
+#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c)
+#define CHV_EU311_PG_ENABLE (1 << 1)
+
+#define GEN7_SARCHKMD _MMIO(0xb000)
+#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
+#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
+
+#define GEN8_GARBCNTL _MMIO(0xb004)
+#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
+#define GEN11_ARBITRATION_PRIO_ORDER_MASK (0x3f << 22)
+#define GEN11_HASH_CTRL_EXCL_MASK (0x7f << 0)
+#define GEN11_HASH_CTRL_EXCL_BIT0 (1 << 0)
+
+#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
+#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
+
+#define GEN7_L3SQCREG1 _MMIO(0xb010)
+#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
+
+#define GEN7_L3CNTLREG1 _MMIO(0xb01c)
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
+#define GEN7_L3AGDIS (1 << 19)
+
+#define XEHPC_LNCFMISCCFGREG0 _MMIO(0xb01c)
+#define XEHPC_OVRLSCCC REG_BIT(0)
+
+#define GEN7_L3CNTLREG2 _MMIO(0xb020)
+
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
+#define GEN9_LNCFCMOCS_REG_COUNT 32
+
+#define GEN7_L3CNTLREG3 _MMIO(0xb024)
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xb030)
+#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+
+#define GEN7_L3SQCREG4 _MMIO(0xb034)
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
+
+#define HSW_SCRATCH1 _MMIO(0xb038)
+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27)
+
+#define GEN7_L3LOG(slice, i) _MMIO(0xb070 + (slice) * 0x200 + (i) * 4)
+#define GEN7_L3LOG_SIZE 0x80
+
+#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
+#define PMFLUSHDONE_LNICRSDROP (1 << 20)
+#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
+#define PMFLUSHDONE_LNEBLK (1 << 22)
+
+#define XEHP_L3NODEARBCFG _MMIO(0xb0b4)
+#define XEHP_LNESPARE REG_BIT(19)
+
+#define GEN8_L3SQCREG1 _MMIO(0xb100)
+/*
+ * Note that on CHV the following has an off-by-one error wrt. to BSpec.
+ * Using the formula in BSpec leads to a hang, while the formula here works
+ * fine and matches the formulas for all other platforms. A BSpec change
+ * request has been filed to clarify this.
+ */
+#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
+#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
+#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
+
+#define GEN10_L3_CHICKEN_MODE_REGISTER _MMIO(0xb114)
+#define GEN11_I2M_WRITE_DISABLE (1 << 28)
+
+#define GEN8_L3SQCREG4 _MMIO(0xb118)
+#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
+#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
+#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21)
+#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22)
+
+#define GEN9_SCRATCH1 _MMIO(0xb11c)
+#define EVICTION_PERF_FIX_ENABLE REG_BIT(8)
+
+#define BDW_SCRATCH1 _MMIO(0xb11c)
+#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
+
+#define GEN11_SCRATCH2 _MMIO(0xb140)
+#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19)
+
+#define GEN11_L3SQCREG5 _MMIO(0xb158)
+#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
+
+#define MLTICTXCTL _MMIO(0xb170)
+#define TDONRENDER REG_BIT(2)
+
+#define XEHP_L3SCQREG7 _MMIO(0xb188)
+#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+
+#define XEHPC_L3SCRUB _MMIO(0xb18c)
+#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
+#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
+#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
+
+#define L3SQCREG1_CCS0 _MMIO(0xb200)
+#define FLUSHALLNONCOH REG_BIT(5)
+
+#define GEN11_GLBLINVL _MMIO(0xb404)
+#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5)
+#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5)
+
+#define GEN11_LSN_UNSLCVC _MMIO(0xb43c)
+#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
+#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
+
+#define GUCPMTIMESTAMP _MMIO(0xc3e8)
+
+#define __GEN9_RCS0_MOCS0 0xc800
+#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS0_MOCS0 0xc900
+#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS1_MOCS0 0xca00
+#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
+#define __GEN9_VECS0_MOCS0 0xcb00
+#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
+#define __GEN9_BCS0_MOCS0 0xcc00
+#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
+
+#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8)
+#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
+#define FAULT_VA_HIGH_BITS (0xf << 0)
+#define FAULT_GTT_SEL (1 << 4)
+
+#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
+#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
+#define RING_FAULT_GTTSEL_MASK (1 << 11)
+#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
+#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
+#define RING_FAULT_VALID (1 << 0)
+
+#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
+#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
+#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
+#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
+#define GEN12_COMPCTX_TLB_INV_CR _MMIO(0xcf04)
+
+#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28)
+#define RENDER_MOD_CTRL _MMIO(0xcf2c)
+#define COMP_MOD_CTRL _MMIO(0xcf30)
+#define VDBX_MOD_CTRL _MMIO(0xcf34)
+#define VEBX_MOD_CTRL _MMIO(0xcf38)
+#define FORCE_MISS_FTLB REG_BIT(3)
+
+#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
+#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
+#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
+#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
+
+#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
+#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
+#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
+
+#define GEN12_GAM_DONE _MMIO(0xcf68)
+
+#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
+#define GEN7_MAX_PS_THREAD_DEP (8 << 12)
+#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10)
+#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3)
+
+#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
+#define GEN7_ROW_INSTDONE _MMIO(0xe164)
+
+#define HALF_SLICE_CHICKEN2 _MMIO(0xe180)
+#define GEN8_ST_PO_DISABLE (1 << 13)
+
+#define HALF_SLICE_CHICKEN3 _MMIO(0xe184)
+#define HSW_SAMPLE_C_PERFORMANCE (1 << 9)
+#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
+#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
+#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1)
+
+#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188)
+#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5)
+#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
+
+#define GEN10_SAMPLER_MODE _MMIO(0xe18c)
+#define ENABLE_SMALLPL REG_BIT(15)
+#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
+#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
+
+#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
+#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8)
+#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2)
+
+#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
+#define ENABLE_EU_COUNT_FOR_TDL_FLUSH REG_BIT(10)
+#define DISABLE_ECC REG_BIT(5)
+#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4)
+#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+
+#define EU_PERF_CNTL0 _MMIO(0xe458)
+#define EU_PERF_CNTL4 _MMIO(0xe45c)
+
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13)
+#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+#define THREAD_EX_ARB_MODE REG_GENMASK(3, 2)
+#define THREAD_EX_ARB_MODE_RR_AFTER_DEP REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
+
+#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
+#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
+
+#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
+#define FLOW_CONTROL_ENABLE REG_BIT(15)
+#define UGM_BACKUP_MODE REG_BIT(13)
+#define MDQ_ARBITRATION_MODE REG_BIT(12)
+#define SYSTOLIC_DOP_CLOCK_GATING_DIS REG_BIT(10)
+#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8)
+#define STALL_DOP_GATING_DISABLE REG_BIT(5)
+#define THROTTLE_12_5 REG_GENMASK(4, 2)
+#define DISABLE_EARLY_EOT REG_BIT(1)
+
+#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN12_DISABLE_READ_SUPPRESSION REG_BIT(15)
+#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
+#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12)
+#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+
+#define RT_CTRL _MMIO(0xe530)
+#define DIS_NULL_QUERY REG_BIT(10)
+#define STACKID_CTRL REG_GENMASK(6, 5)
+#define STACKID_CTRL_512 REG_FIELD_PREP(STACKID_CTRL, 0x2)
+
+#define EU_PERF_CNTL1 _MMIO(0xe558)
+#define EU_PERF_CNTL5 _MMIO(0xe55c)
+
+#define GEN12_HDC_CHICKEN0 _MMIO(0xe5f0)
+#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
+#define ICL_HDC_MODE _MMIO(0xe5f4)
+
+#define EU_PERF_CNTL2 _MMIO(0xe658)
+#define EU_PERF_CNTL6 _MMIO(0xe65c)
+#define EU_PERF_CNTL3 _MMIO(0xe758)
+
+#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8)
+#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
+#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
+#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4)
+#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32)
+#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32)
+#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32)
+#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32)
+#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32)
+
+#define SARB_CHICKEN1 _MMIO(0xe90c)
+#define COMP_CKN_IN REG_GENMASK(30, 29)
+
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100)
+
+#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
+#define DOP_CLOCK_GATING_DISABLE (1 << 0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
+#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
+
+#define __GEN11_VCS2_MOCS0 0x10000
+#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
+
+#define CRSTANDVID _MMIO(0x11100)
+#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE _MMIO(0x11110)
+#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 _MMIO(0x11114)
+#define VIDFREQ3 _MMIO(0x11118)
+#define VIDFREQ4 _MMIO(0x1111c)
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1 << 7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST _MMIO(0x1117c)
+#define MEMINTREN _MMIO(0x11180) /* 16 bits */
+#define MEMINT_RSEXIT_EN (1 << 8)
+#define MEMINT_CX_SUPR_EN (1 << 7)
+#define MEMINT_CONT_BUSY_EN (1 << 6)
+#define MEMINT_AVG_BUSY_EN (1 << 5)
+#define MEMINT_EVAL_CHG_EN (1 << 4)
+#define MEMINT_MON_IDLE_EN (1 << 3)
+#define MEMINT_UP_EVAL_EN (1 << 2)
+#define MEMINT_DOWN_EVAL_EN (1 << 1)
+#define MEMINT_SW_CMD_EN (1 << 0)
+#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS _MMIO(0x11184)
+#define MEMINT_RSEXIT (1 << 7)
+#define MEMINT_CONT_BUSY (1 << 6)
+#define MEMINT_AVG_BUSY (1 << 5)
+#define MEMINT_EVAL_CHG (1 << 4)
+#define MEMINT_MON_IDLE (1 << 3)
+#define MEMINT_UP_EVAL (1 << 2)
+#define MEMINT_DOWN_EVAL (1 << 1)
+#define MEMINT_SW_CMD (1 << 0)
+#define MEMMODECTL _MMIO(0x11190)
+#define MEMMODE_BOOST_EN (1 << 31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1 << 15)
+#define MEMMODE_SWMODE_EN (1 << 14)
+#define MEMMODE_RCLK_GATE (1 << 13)
+#define MEMMODE_HW_UPDATE (1 << 12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG _MMIO(0x1119c)
+#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1 << 12)
+#define SFCAVM (1 << 11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG _MMIO(0x111a0)
+#define RCBMINAVG _MMIO(0x111a0)
+#define RCUPEI _MMIO(0x111b0)
+#define RCDNEI _MMIO(0x111b4)
+#define RSTDBYCTL _MMIO(0x111b8)
+#define RS1EN (1 << 31)
+#define RS2EN (1 << 30)
+#define RS3EN (1 << 29)
+#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */
+#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7 << 20)
+#define RSX_STATUS_ON (0 << 20)
+#define RSX_STATUS_RC1 (1 << 20)
+#define RSX_STATUS_RC1E (2 << 20)
+#define RSX_STATUS_RS1 (3 << 20)
+#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7 << 20)
+#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */
+#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1 << 17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3 << 14)
+#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1 << 14)
+#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3 << 12)
+#define SLOW_RS123 (0 << 12)
+#define SLOW_RS23 (1 << 12)
+#define SLOW_RS3 (2 << 12)
+#define NORMAL_RS123 (3 << 12)
+#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */
+#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3 << 4)
+#define RS_CSTATE_C367_RS1 (0 << 4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define RS_CSTATE_RSVD (2 << 4)
+#define RS_CSTATE_C367_RS2 (3 << 4)
+#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */
+#define VIDCTL _MMIO(0x111c0)
+#define VIDSTS _MMIO(0x111c8)
+#define VIDSTART _MMIO(0x111cc) /* 8 bits */
+#define MEMSTAT_ILK _MMIO(0x111f8)
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1 << 2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define PMMISC _MMIO(0x11214)
+#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */
+#define SDEW _MMIO(0x1124c)
+#define CSIEW0 _MMIO(0x11250)
+#define CSIEW1 _MMIO(0x11254)
+#define CSIEW2 _MMIO(0x11258)
+#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */
+#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */
+#define MCHAFE _MMIO(0x112c0)
+#define CSIEC _MMIO(0x112e0)
+#define DMIEC _MMIO(0x112e4)
+#define DDREC _MMIO(0x112e8)
+#define PEG0EC _MMIO(0x112ec)
+#define PEG1EC _MMIO(0x112f0)
+#define GFXEC _MMIO(0x112f4)
+#define INTTOEXT_BASE_ILK _MMIO(0x11300)
+#define RPPREVBSYTUPAVG _MMIO(0x113b8)
+#define RCPREVBSYTUPAVG _MMIO(0x113b8)
+#define RCPREVBSYTDNAVG _MMIO(0x113bc)
+#define RPPREVBSYTDNAVG _MMIO(0x113bc)
+#define ECR _MMIO(0x11600)
+#define ECR_GPFE (1 << 31)
+#define ECR_IMONE (1 << 30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 _MMIO(0x11608)
+#define OGW1 _MMIO(0x1160c)
+#define EG0 _MMIO(0x11610)
+#define EG1 _MMIO(0x11614)
+#define EG2 _MMIO(0x11618)
+#define EG3 _MMIO(0x1161c)
+#define EG4 _MMIO(0x11620)
+#define EG5 _MMIO(0x11624)
+#define EG6 _MMIO(0x11628)
+#define EG7 _MMIO(0x1162c)
+#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */
+#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */
+#define LCFUSE02 _MMIO(0x116c0)
+#define LCFUSE_HIV_MASK 0x000000ff
+
+#define GAC_ECO_BITS _MMIO(0x14090)
+#define ECOBITS_SNB_BIT (1 << 13)
+#define ECOBITS_PPGTT_CACHE64B (3 << 8)
+#define ECOBITS_PPGTT_CACHE4B (0 << 8)
+
+#define GEN12_RCU_MODE _MMIO(0x14800)
+#define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+
+#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
+#define CHV_FGT_DISABLE_SS0 (1 << 10)
+#define CHV_FGT_DISABLE_SS1 (1 << 11)
+#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
+#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
+#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
+#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
+#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
+#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
+
+#define BCS_SWCTRL _MMIO(0x22200)
+#define BCS_SRC_Y REG_BIT(0)
+#define BCS_DST_Y REG_BIT(1)
+
+#define GAB_CTL _MMIO(0x24000)
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
+
+#define GEN6_PMISR _MMIO(0x44020)
+#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */
+#define GEN6_PMIIR _MMIO(0x44028)
+#define GEN6_PMIER _MMIO(0x4402c)
+#define GEN6_PM_MBOX_EVENT (1 << 25)
+#define GEN6_PM_THERMAL_EVENT (1 << 24)
+/*
+ * For Gen11 these are in the upper word of the GPM_WGBOXPERF
+ * registers. Shifting is handled on accessing the imr and ier.
+ */
+#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
+#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
+#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \
+ GEN6_PM_RP_UP_THRESHOLD | \
+ GEN6_PM_RP_DOWN_EI_EXPIRED | \
+ GEN6_PM_RP_DOWN_THRESHOLD | \
+ GEN6_PM_RP_DOWN_TIMEOUT)
+
+#define GEN7_GT_SCRATCH(i) _MMIO(0x4f100 + (i) * 4)
+#define GEN7_GT_SCRATCH_REG_NUM 8
+
+#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
+#define GFX_FLSH_CNTL_EN (1 << 0)
+
+#define GTFIFODBG _MMIO(0x120000)
+#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
+#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
+#define GT_FIFO_SBDROPERR (1 << 6)
+#define GT_FIFO_BLOBDROPERR (1 << 5)
+#define GT_FIFO_SB_READ_ABORTERR (1 << 4)
+#define GT_FIFO_DROPERR (1 << 3)
+#define GT_FIFO_OVFERR (1 << 2)
+#define GT_FIFO_IAWRERR (1 << 1)
+#define GT_FIFO_IARDERR (1 << 0)
+
+#define GTFIFOCTL _MMIO(0x120008)
+#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
+#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
+#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
+
+#define FORCEWAKE_MT_ACK _MMIO(0x130040)
+#define FORCEWAKE_ACK_HSW _MMIO(0x130044)
+#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044)
+#define FORCEWAKE_KERNEL BIT(0)
+#define FORCEWAKE_USER BIT(1)
+#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
+#define FORCEWAKE_ACK _MMIO(0x130090)
+#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090)
+#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
+#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
+#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
+#define VLV_GTLC_PW_STATUS _MMIO(0x130094)
+#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
+#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
+#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
+#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
+#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098)
+#define VLV_GFX_CLK_STATUS_BIT (1 << 3)
+#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2)
+#define FORCEWAKE_VLV _MMIO(0x1300b0)
+#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
+#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8)
+#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc)
+
+#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+
+#define GEN6_GT_CORE_STATUS _MMIO(0x138060)
+#define GEN6_CORE_CPD_STATE_MASK (7 << 4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
+#define GEN8_GT_SLICE_INFO _MMIO(0x138064)
+#define GEN8_LSLICESTAT_MASK 0x7
+
+#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104)
+#define VLV_COUNTER_CONTROL _MMIO(0x138104)
+#define VLV_COUNT_RANGE_HIGH (1 << 15)
+#define VLV_MEDIA_RC0_COUNT_EN (1 << 5)
+#define VLV_RENDER_RC0_COUNT_EN (1 << 4)
+#define VLV_MEDIA_RC6_COUNT_EN (1 << 1)
+#define VLV_RENDER_RC6_COUNT_EN (1 << 0)
+#define GEN6_GT_GFX_RC6 _MMIO(0x138108)
+#define VLV_GT_MEDIA_RC6 _MMIO(0x13810c)
+
+#define GEN6_GT_GFX_RC6p _MMIO(0x13810c)
+#define GEN6_GT_GFX_RC6pp _MMIO(0x138110)
+#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
+#define VLV_MEDIA_C0_COUNT _MMIO(0x13811c)
+
+#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
+#define GEN11_CSME (31)
+#define GEN11_GUNIT (28)
+#define GEN11_GUC (25)
+#define GEN11_WDPERF (20)
+#define GEN11_KCR (19)
+#define GEN11_GTPM (16)
+#define GEN11_BCS (15)
+#define XEHPC_BCS1 (14)
+#define XEHPC_BCS2 (13)
+#define XEHPC_BCS3 (12)
+#define XEHPC_BCS4 (11)
+#define XEHPC_BCS5 (10)
+#define XEHPC_BCS6 (9)
+#define XEHPC_BCS7 (8)
+#define XEHPC_BCS8 (23)
+#define GEN12_CCS3 (7)
+#define GEN12_CCS2 (6)
+#define GEN12_CCS1 (5)
+#define GEN12_CCS0 (4)
+#define GEN11_RCS0 (0)
+#define GEN11_VECS(x) (31 - (x))
+#define GEN11_VCS(x) (x)
+
+#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
+#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
+#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038)
+#define ENGINE1_MASK REG_GENMASK(31, 16)
+#define ENGINE0_MASK REG_GENMASK(15, 0)
+#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
+#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040)
+#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044)
+#define GEN12_CCS_RSVD_INTR_ENABLE _MMIO(0x190048)
+
+#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
+#define GEN11_INTR_DATA_VALID (1 << 31)
+#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
+#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
+#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
+/* irq instances for OTHER_CLASS */
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GTPM_INSTANCE 1
+#define OTHER_KCR_INSTANCE 4
+#define OTHER_GSC_INSTANCE 6
+#define OTHER_MEDIA_GUC_INSTANCE 16
+#define OTHER_MEDIA_GTPM_INSTANCE 17
+
+#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
+
+#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090)
+#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
+#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
+#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
+#define GEN12_VCS4_VCS5_INTR_MASK _MMIO(0x1900b0)
+#define GEN12_VCS6_VCS7_INTR_MASK _MMIO(0x1900b4)
+#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
+#define GEN12_VECS2_VECS3_INTR_MASK _MMIO(0x1900d4)
+#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
+#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
+#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
+#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
+#define GEN12_CCS0_CCS1_INTR_MASK _MMIO(0x190100)
+#define GEN12_CCS2_CCS3_INTR_MASK _MMIO(0x190104)
+#define XEHPC_BCS1_BCS2_INTR_MASK _MMIO(0x190110)
+#define XEHPC_BCS3_BCS4_INTR_MASK _MMIO(0x190114)
+#define XEHPC_BCS5_BCS6_INTR_MASK _MMIO(0x190118)
+#define XEHPC_BCS7_BCS8_INTR_MASK _MMIO(0x19011c)
+
+#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
+
+/*
+ * Standalone Media's non-engine GT registers are located at their regular GT
+ * offsets plus 0x380000. This extra offset is stored inside the intel_uncore
+ * structure so that the existing code can be used for both GTs without
+ * modification.
+ */
+#define MTL_MEDIA_GSI_BASE 0x380000
+
+#endif /* __INTEL_GT_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
new file mode 100644
index 000000000..1dfd01668
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/workqueue.h>
+
+#include "i915_drv.h" /* for_each_engine() */
+#include "i915_request.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_execlists_submission.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_timeline.h"
+
+static bool retire_requests(struct intel_timeline *tl)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (!i915_request_retire(rq))
+ return false;
+
+ /* And check nothing new was submitted */
+ return !i915_active_fence_isset(&tl->last_request);
+}
+
+static bool engine_active(const struct intel_engine_cs *engine)
+{
+ return !list_empty(&engine->kernel_context->timeline->requests);
+}
+
+static bool flush_submission(struct intel_gt *gt, long timeout)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool active = false;
+
+ if (!timeout)
+ return false;
+
+ if (!intel_gt_pm_is_awake(gt))
+ return false;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_flush_submission(engine);
+
+ /* Flush the background retirement and idle barriers */
+ flush_work(&engine->retire_work);
+ flush_delayed_work(&engine->wakeref.work);
+
+ /* Is the idle barrier still outstanding? */
+ active |= engine_active(engine);
+ }
+
+ return active;
+}
+
+static void engine_retire(struct work_struct *work)
+{
+ struct intel_engine_cs *engine =
+ container_of(work, typeof(*engine), retire_work);
+ struct intel_timeline *tl = xchg(&engine->retire, NULL);
+
+ do {
+ struct intel_timeline *next = xchg(&tl->retire, NULL);
+
+ /*
+ * Our goal here is to retire _idle_ timelines as soon as
+ * possible (as they are idle, we do not expect userspace
+ * to be cleaning up anytime soon).
+ *
+ * If the timeline is currently locked, either it is being
+ * retired elsewhere or about to be!
+ */
+ if (mutex_trylock(&tl->mutex)) {
+ retire_requests(tl);
+ mutex_unlock(&tl->mutex);
+ }
+ intel_timeline_put(tl);
+
+ GEM_BUG_ON(!next);
+ tl = ptr_mask_bits(next, 1);
+ } while (tl);
+}
+
+static bool add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl)
+{
+#define STUB ((struct intel_timeline *)1)
+ struct intel_timeline *first;
+
+ /*
+ * We open-code a llist here to include the additional tag [BIT(0)]
+ * so that we know when the timeline is already on a
+ * retirement queue: either this engine or another.
+ */
+
+ if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
+ return false;
+
+ intel_timeline_get(tl);
+ first = READ_ONCE(engine->retire);
+ do
+ tl->retire = ptr_pack_bits(first, 1, 1);
+ while (!try_cmpxchg(&engine->retire, &first, tl));
+
+ return !first;
+}
+
+void intel_engine_add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl)
+{
+ /* We don't deal well with the engine disappearing beneath us */
+ GEM_BUG_ON(intel_engine_is_virtual(engine));
+
+ if (add_retire(engine, tl))
+ schedule_work(&engine->retire_work);
+}
+
+void intel_engine_init_retire(struct intel_engine_cs *engine)
+{
+ INIT_WORK(&engine->retire_work, engine_retire);
+}
+
+void intel_engine_fini_retire(struct intel_engine_cs *engine)
+{
+ flush_work(&engine->retire_work);
+ GEM_BUG_ON(engine->retire);
+}
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
+ long *remaining_timeout)
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ unsigned long active_count = 0;
+ LIST_HEAD(free);
+
+ flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
+ spin_lock(&timelines->lock);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++; /* report busy to caller, try again? */
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ atomic_inc(&tl->active_count); /* pin the list element */
+ spin_unlock(&timelines->lock);
+
+ if (timeout > 0) {
+ struct dma_fence *fence;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ mutex_unlock(&tl->mutex);
+
+ timeout = dma_fence_wait_timeout(fence,
+ true,
+ timeout);
+ dma_fence_put(fence);
+
+ /* Retirement is best effort */
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++;
+ goto out_active;
+ }
+ }
+ }
+
+ if (!retire_requests(tl))
+ active_count++;
+ mutex_unlock(&tl->mutex);
+
+out_active: spin_lock(&timelines->lock);
+
+ /* Resume list iteration after reacquiring spinlock */
+ list_safe_reset_next(tl, tn, link);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(atomic_read(&tl->active_count));
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock(&timelines->lock);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+
+ if (flush_submission(gt, timeout)) /* Wait, there's more! */
+ active_count++;
+
+ if (remaining_timeout)
+ *remaining_timeout = timeout;
+
+ return active_count ? timeout ?: -ETIME : 0;
+}
+
+static void retire_work_handler(struct work_struct *work)
+{
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), requests.retire_work.work);
+
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+ intel_gt_retire_requests(gt);
+}
+
+void intel_gt_init_requests(struct intel_gt *gt)
+{
+ INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
+}
+
+void intel_gt_park_requests(struct intel_gt *gt)
+{
+ cancel_delayed_work(&gt->requests.retire_work);
+}
+
+void intel_gt_unpark_requests(struct intel_gt *gt)
+{
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+void intel_gt_fini_requests(struct intel_gt *gt)
+{
+ /* Wait until the work is marked as finished before unloading! */
+ cancel_delayed_work_sync(&gt->requests.retire_work);
+
+ flush_work(&gt->watchdog.work);
+}
+
+void intel_gt_watchdog_work(struct work_struct *work)
+{
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), watchdog.work);
+ struct i915_request *rq, *rn;
+ struct llist_node *first;
+
+ first = llist_del_all(&gt->watchdog.list);
+ if (!first)
+ return;
+
+ llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
+ if (!i915_request_completed(rq)) {
+ struct dma_fence *f = &rq->fence;
+
+ pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
+ f->ops->get_driver_name(f),
+ f->ops->get_timeline_name(f),
+ f->seqno);
+ i915_request_cancel(rq, -EINTR);
+ }
+ i915_request_put(rq);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
new file mode 100644
index 000000000..d2969f68d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_REQUESTS_H
+#define INTEL_GT_REQUESTS_H
+
+#include <linux/stddef.h>
+
+struct intel_engine_cs;
+struct intel_gt;
+struct intel_timeline;
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
+ long *remaining_timeout);
+static inline void intel_gt_retire_requests(struct intel_gt *gt)
+{
+ intel_gt_retire_requests_timeout(gt, 0, NULL);
+}
+
+void intel_engine_init_retire(struct intel_engine_cs *engine);
+void intel_engine_add_retire(struct intel_engine_cs *engine,
+ struct intel_timeline *tl);
+void intel_engine_fini_retire(struct intel_engine_cs *engine);
+
+void intel_gt_init_requests(struct intel_gt *gt);
+void intel_gt_park_requests(struct intel_gt *gt);
+void intel_gt_unpark_requests(struct intel_gt *gt);
+void intel_gt_fini_requests(struct intel_gt *gt);
+
+#endif /* INTEL_GT_REQUESTS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
new file mode 100644
index 000000000..9486dd3be
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/printk.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_sysfs.h"
+#include "intel_gt.h"
+#include "intel_gt_sysfs.h"
+#include "intel_gt_sysfs_pm.h"
+#include "intel_gt_types.h"
+#include "intel_rc6.h"
+
+bool is_object_gt(struct kobject *kobj)
+{
+ return !strncmp(kobj->name, "gt", 2);
+}
+
+struct intel_gt *intel_gt_sysfs_get_drvdata(struct kobject *kobj,
+ const char *name)
+{
+ /*
+ * We are interested at knowing from where the interface
+ * has been called, whether it's called from gt/ or from
+ * the parent directory.
+ * From the interface position it depends also the value of
+ * the private data.
+ * If the interface is called from gt/ then private data is
+ * of the "struct intel_gt *" type, otherwise it's * a
+ * "struct drm_i915_private *" type.
+ */
+ if (!is_object_gt(kobj)) {
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ return to_gt(i915);
+ }
+
+ return kobj_to_gt(kobj);
+}
+
+static struct kobject *gt_get_parent_obj(struct intel_gt *gt)
+{
+ return &gt->i915->drm.primary->kdev->kobj;
+}
+
+static ssize_t id_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+
+ return sysfs_emit(buf, "%u\n", gt->info.id);
+}
+static struct kobj_attribute attr_id = __ATTR_RO(id);
+
+static struct attribute *id_attrs[] = {
+ &attr_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(id);
+
+/* A kobject needs a release() method even if it does nothing */
+static void kobj_gt_release(struct kobject *kobj)
+{
+}
+
+static struct kobj_type kobj_gt_type = {
+ .release = kobj_gt_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = id_groups,
+};
+
+void intel_gt_sysfs_register(struct intel_gt *gt)
+{
+ /*
+ * We need to make things right with the
+ * ABI compatibility. The files were originally
+ * generated under the parent directory.
+ *
+ * We generate the files only for gt 0
+ * to avoid duplicates.
+ */
+ if (gt_is_root(gt))
+ intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
+
+ /* init and xfer ownership to sysfs tree */
+ if (kobject_init_and_add(&gt->sysfs_gt, &kobj_gt_type,
+ gt->i915->sysfs_gt, "gt%d", gt->info.id))
+ goto exit_fail;
+
+ gt->sysfs_defaults = kobject_create_and_add(".defaults", &gt->sysfs_gt);
+ if (!gt->sysfs_defaults)
+ goto exit_fail;
+
+ intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
+
+ return;
+
+exit_fail:
+ kobject_put(&gt->sysfs_gt);
+ drm_warn(&gt->i915->drm,
+ "failed to initialize gt%d sysfs root\n", gt->info.id);
+}
+
+void intel_gt_sysfs_unregister(struct intel_gt *gt)
+{
+ kobject_put(gt->sysfs_defaults);
+ kobject_put(&gt->sysfs_gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
new file mode 100644
index 000000000..c3a123fae
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SYSFS_GT_H__
+#define __SYSFS_GT_H__
+
+#include <linux/ctype.h>
+#include <linux/kobject.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON() */
+#include "intel_gt_types.h"
+
+struct intel_gt;
+
+bool is_object_gt(struct kobject *kobj);
+
+struct drm_i915_private *kobj_to_i915(struct kobject *kobj);
+
+struct kobject *
+intel_gt_create_kobj(struct intel_gt *gt,
+ struct kobject *dir,
+ const char *name);
+
+static inline struct intel_gt *kobj_to_gt(struct kobject *kobj)
+{
+ return container_of(kobj, struct intel_gt, sysfs_gt);
+}
+
+void intel_gt_sysfs_register(struct intel_gt *gt);
+void intel_gt_sysfs_unregister(struct intel_gt *gt);
+struct intel_gt *intel_gt_sysfs_get_drvdata(struct kobject *kobj,
+ const char *name);
+
+#endif /* SYSFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
new file mode 100644
index 000000000..b108f0a8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+#include <linux/sysfs.h>
+#include <linux/printk.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_sysfs.h"
+#include "intel_gt.h"
+#include "intel_gt_regs.h"
+#include "intel_gt_sysfs.h"
+#include "intel_gt_sysfs_pm.h"
+#include "intel_pcode.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+
+enum intel_gt_sysfs_op {
+ INTEL_GT_SYSFS_MIN = 0,
+ INTEL_GT_SYSFS_MAX,
+};
+
+static int
+sysfs_gt_attribute_w_func(struct kobject *kobj, struct attribute *attr,
+ int (func)(struct intel_gt *gt, u32 val), u32 val)
+{
+ struct intel_gt *gt;
+ int ret;
+
+ if (!is_object_gt(kobj)) {
+ int i;
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ for_each_gt(gt, i915, i) {
+ ret = func(gt, val);
+ if (ret)
+ break;
+ }
+ } else {
+ gt = intel_gt_sysfs_get_drvdata(kobj, attr->name);
+ ret = func(gt, val);
+ }
+
+ return ret;
+}
+
+static u32
+sysfs_gt_attribute_r_func(struct kobject *kobj, struct attribute *attr,
+ u32 (func)(struct intel_gt *gt),
+ enum intel_gt_sysfs_op op)
+{
+ struct intel_gt *gt;
+ u32 ret;
+
+ ret = (op == INTEL_GT_SYSFS_MAX) ? 0 : (u32) -1;
+
+ if (!is_object_gt(kobj)) {
+ int i;
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ for_each_gt(gt, i915, i) {
+ u32 val = func(gt);
+
+ switch (op) {
+ case INTEL_GT_SYSFS_MIN:
+ if (val < ret)
+ ret = val;
+ break;
+
+ case INTEL_GT_SYSFS_MAX:
+ if (val > ret)
+ ret = val;
+ break;
+ }
+ }
+ } else {
+ gt = intel_gt_sysfs_get_drvdata(kobj, attr->name);
+ ret = func(gt);
+ }
+
+ return ret;
+}
+
+/* RC6 interfaces will show the minimum RC6 residency value */
+#define sysfs_gt_attribute_r_min_func(d, a, f) \
+ sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MIN)
+
+/* Frequency interfaces will show the maximum frequency value */
+#define sysfs_gt_attribute_r_max_func(d, a, f) \
+ sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MAX)
+
+#define INTEL_GT_SYSFS_SHOW(_name, _attr_type) \
+ static ssize_t _name##_show_common(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+ { \
+ u32 val = sysfs_gt_attribute_r_##_attr_type##_func(kobj, attr, \
+ __##_name##_show); \
+ \
+ return sysfs_emit(buff, "%u\n", val); \
+ } \
+ static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buff) \
+ { \
+ return _name ##_show_common(kobj, &attr->attr, buff); \
+ } \
+ static ssize_t _name##_dev_show(struct device *dev, \
+ struct device_attribute *attr, char *buff) \
+ { \
+ return _name##_show_common(&dev->kobj, &attr->attr, buff); \
+ }
+
+#define INTEL_GT_SYSFS_STORE(_name, _func) \
+ static ssize_t _name##_store_common(struct kobject *kobj, \
+ struct attribute *attr, \
+ const char *buff, size_t count) \
+ { \
+ int ret; \
+ u32 val; \
+ \
+ ret = kstrtou32(buff, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ret = sysfs_gt_attribute_w_func(kobj, attr, _func, val); \
+ \
+ return ret ?: count; \
+ } \
+ static ssize_t _name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, const char *buff, \
+ size_t count) \
+ { \
+ return _name##_store_common(kobj, &attr->attr, buff, count); \
+ } \
+ static ssize_t _name##_dev_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buff, size_t count) \
+ { \
+ return _name##_store_common(&dev->kobj, &attr->attr, buff, count); \
+ }
+
+#define INTEL_GT_SYSFS_SHOW_MAX(_name) INTEL_GT_SYSFS_SHOW(_name, max)
+#define INTEL_GT_SYSFS_SHOW_MIN(_name) INTEL_GT_SYSFS_SHOW(_name, min)
+
+#define INTEL_GT_ATTR_RW(_name) \
+ static struct kobj_attribute attr_##_name = __ATTR_RW(_name)
+
+#define INTEL_GT_ATTR_RO(_name) \
+ static struct kobj_attribute attr_##_name = __ATTR_RO(_name)
+
+#define INTEL_GT_DUAL_ATTR_RW(_name) \
+ static struct device_attribute dev_attr_##_name = __ATTR(_name, 0644, \
+ _name##_dev_show, \
+ _name##_dev_store); \
+ INTEL_GT_ATTR_RW(_name)
+
+#define INTEL_GT_DUAL_ATTR_RO(_name) \
+ static struct device_attribute dev_attr_##_name = __ATTR(_name, 0444, \
+ _name##_dev_show, \
+ NULL); \
+ INTEL_GT_ATTR_RO(_name)
+
+#ifdef CONFIG_PM
+static u32 get_residency(struct intel_gt *gt, i915_reg_t reg)
+{
+ intel_wakeref_t wakeref;
+ u64 res = 0;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ res = intel_rc6_residency_us(&gt->rc6, reg);
+
+ return DIV_ROUND_CLOSEST_ULL(res, 1000);
+}
+
+static u8 get_rc6_mask(struct intel_gt *gt)
+{
+ u8 mask = 0;
+
+ if (HAS_RC6(gt->i915))
+ mask |= BIT(0);
+ if (HAS_RC6p(gt->i915))
+ mask |= BIT(1);
+ if (HAS_RC6pp(gt->i915))
+ mask |= BIT(2);
+
+ return mask;
+}
+
+static ssize_t rc6_enable_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+
+ return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
+}
+
+static ssize_t rc6_enable_dev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(&dev->kobj, attr->attr.name);
+
+ return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
+}
+
+static u32 __rc6_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6);
+}
+
+static u32 __rc6p_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6p);
+}
+
+static u32 __rc6pp_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6pp);
+}
+
+static u32 __media_rc6_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, VLV_GT_MEDIA_RC6);
+}
+
+INTEL_GT_SYSFS_SHOW_MIN(rc6_residency_ms);
+INTEL_GT_SYSFS_SHOW_MIN(rc6p_residency_ms);
+INTEL_GT_SYSFS_SHOW_MIN(rc6pp_residency_ms);
+INTEL_GT_SYSFS_SHOW_MIN(media_rc6_residency_ms);
+
+INTEL_GT_DUAL_ATTR_RO(rc6_enable);
+INTEL_GT_DUAL_ATTR_RO(rc6_residency_ms);
+INTEL_GT_DUAL_ATTR_RO(rc6p_residency_ms);
+INTEL_GT_DUAL_ATTR_RO(rc6pp_residency_ms);
+INTEL_GT_DUAL_ATTR_RO(media_rc6_residency_ms);
+
+static struct attribute *rc6_attrs[] = {
+ &attr_rc6_enable.attr,
+ &attr_rc6_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *rc6p_attrs[] = {
+ &attr_rc6p_residency_ms.attr,
+ &attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *media_rc6_attrs[] = {
+ &attr_media_rc6_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *rc6_dev_attrs[] = {
+ &dev_attr_rc6_enable.attr,
+ &dev_attr_rc6_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *rc6p_dev_attrs[] = {
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *media_rc6_dev_attrs[] = {
+ &dev_attr_media_rc6_residency_ms.attr,
+ NULL
+};
+
+static const struct attribute_group rc6_attr_group[] = {
+ { .attrs = rc6_attrs, },
+ { .name = power_group_name, .attrs = rc6_dev_attrs, },
+};
+
+static const struct attribute_group rc6p_attr_group[] = {
+ { .attrs = rc6p_attrs, },
+ { .name = power_group_name, .attrs = rc6p_dev_attrs, },
+};
+
+static const struct attribute_group media_rc6_attr_group[] = {
+ { .attrs = media_rc6_attrs, },
+ { .name = power_group_name, .attrs = media_rc6_dev_attrs, },
+};
+
+static int __intel_gt_sysfs_create_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return is_object_gt(kobj) ?
+ sysfs_create_group(kobj, &grp[0]) :
+ sysfs_merge_group(kobj, &grp[1]);
+}
+
+static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
+{
+ int ret;
+
+ if (!HAS_RC6(gt->i915))
+ return;
+
+ ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RC6 sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+
+ /*
+ * cannot use the is_visible() attribute because
+ * the upper object inherits from the parent group.
+ */
+ if (HAS_RC6p(gt->i915)) {
+ ret = __intel_gt_sysfs_create_group(kobj, rc6p_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RC6p sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
+
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
+ ret = __intel_gt_sysfs_create_group(kobj, media_rc6_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create media %u RC6 sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
+}
+#else
+static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
+{
+}
+#endif /* CONFIG_PM */
+
+static u32 __act_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_read_actual_frequency(&gt->rps);
+}
+
+static u32 __cur_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_requested_frequency(&gt->rps);
+}
+
+static u32 __boost_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_boost_frequency(&gt->rps);
+}
+
+static int __boost_freq_mhz_store(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_boost_frequency(&gt->rps, val);
+}
+
+static u32 __RP0_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rp0_frequency(&gt->rps);
+}
+
+static u32 __RPn_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rpn_frequency(&gt->rps);
+}
+
+static u32 __RP1_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rp1_frequency(&gt->rps);
+}
+
+static u32 __max_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_max_frequency(&gt->rps);
+}
+
+static int __set_max_freq(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_max_frequency(&gt->rps, val);
+}
+
+static u32 __min_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_min_frequency(&gt->rps);
+}
+
+static int __set_min_freq(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_min_frequency(&gt->rps, val);
+}
+
+static u32 __vlv_rpe_freq_mhz_show(struct intel_gt *gt)
+{
+ struct intel_rps *rps = &gt->rps;
+
+ return intel_gpu_freq(rps, rps->efficient_freq);
+}
+
+INTEL_GT_SYSFS_SHOW_MAX(act_freq_mhz);
+INTEL_GT_SYSFS_SHOW_MAX(boost_freq_mhz);
+INTEL_GT_SYSFS_SHOW_MAX(cur_freq_mhz);
+INTEL_GT_SYSFS_SHOW_MAX(RP0_freq_mhz);
+INTEL_GT_SYSFS_SHOW_MAX(RP1_freq_mhz);
+INTEL_GT_SYSFS_SHOW_MAX(RPn_freq_mhz);
+INTEL_GT_SYSFS_SHOW_MAX(max_freq_mhz);
+INTEL_GT_SYSFS_SHOW_MIN(min_freq_mhz);
+INTEL_GT_SYSFS_SHOW_MAX(vlv_rpe_freq_mhz);
+INTEL_GT_SYSFS_STORE(boost_freq_mhz, __boost_freq_mhz_store);
+INTEL_GT_SYSFS_STORE(max_freq_mhz, __set_max_freq);
+INTEL_GT_SYSFS_STORE(min_freq_mhz, __set_min_freq);
+
+#define INTEL_GT_RPS_SYSFS_ATTR(_name, _mode, _show, _store, _show_dev, _store_dev) \
+ static struct device_attribute dev_attr_gt_##_name = __ATTR(gt_##_name, _mode, \
+ _show_dev, _store_dev); \
+ static struct kobj_attribute attr_rps_##_name = __ATTR(rps_##_name, _mode, \
+ _show, _store)
+
+#define INTEL_GT_RPS_SYSFS_ATTR_RO(_name) \
+ INTEL_GT_RPS_SYSFS_ATTR(_name, 0444, _name##_show, NULL, \
+ _name##_dev_show, NULL)
+#define INTEL_GT_RPS_SYSFS_ATTR_RW(_name) \
+ INTEL_GT_RPS_SYSFS_ATTR(_name, 0644, _name##_show, _name##_store, \
+ _name##_dev_show, _name##_dev_store)
+
+/* The below macros generate static structures */
+INTEL_GT_RPS_SYSFS_ATTR_RO(act_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(cur_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RW(boost_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(RP0_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(RP1_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(RPn_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RW(max_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RW(min_freq_mhz);
+INTEL_GT_RPS_SYSFS_ATTR_RO(vlv_rpe_freq_mhz);
+
+#define GEN6_ATTR(p, s) { \
+ &p##attr_##s##_act_freq_mhz.attr, \
+ &p##attr_##s##_cur_freq_mhz.attr, \
+ &p##attr_##s##_boost_freq_mhz.attr, \
+ &p##attr_##s##_max_freq_mhz.attr, \
+ &p##attr_##s##_min_freq_mhz.attr, \
+ &p##attr_##s##_RP0_freq_mhz.attr, \
+ &p##attr_##s##_RP1_freq_mhz.attr, \
+ &p##attr_##s##_RPn_freq_mhz.attr, \
+ NULL, \
+ }
+
+#define GEN6_RPS_ATTR GEN6_ATTR(, rps)
+#define GEN6_GT_ATTR GEN6_ATTR(dev_, gt)
+
+static const struct attribute * const gen6_rps_attrs[] = GEN6_RPS_ATTR;
+static const struct attribute * const gen6_gt_attrs[] = GEN6_GT_ATTR;
+
+static ssize_t punit_req_freq_mhz_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ u32 preq = intel_rps_read_punit_req_frequency(&gt->rps);
+
+ return sysfs_emit(buff, "%u\n", preq);
+}
+
+struct intel_gt_bool_throttle_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ i915_reg_t reg32;
+ u32 mask;
+};
+
+static ssize_t throttle_reason_bool_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_gt_bool_throttle_attr *t_attr =
+ (struct intel_gt_bool_throttle_attr *) attr;
+ bool val = rps_read_mask_mmio(&gt->rps, t_attr->reg32, t_attr->mask);
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+#define INTEL_GT_RPS_BOOL_ATTR_RO(sysfs_func__, mask__) \
+struct intel_gt_bool_throttle_attr attr_##sysfs_func__ = { \
+ .attr = { .name = __stringify(sysfs_func__), .mode = 0444 }, \
+ .show = throttle_reason_bool_show, \
+ .reg32 = GT0_PERF_LIMIT_REASONS, \
+ .mask = mask__, \
+}
+
+INTEL_GT_ATTR_RO(punit_req_freq_mhz);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_status, GT0_PERF_LIMIT_REASONS_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl1, POWER_LIMIT_1_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl2, POWER_LIMIT_2_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl4, POWER_LIMIT_4_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_thermal, THERMAL_LIMIT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_prochot, PROCHOT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
+
+static const struct attribute *throttle_reason_attrs[] = {
+ &attr_throttle_reason_status.attr,
+ &attr_throttle_reason_pl1.attr,
+ &attr_throttle_reason_pl2.attr,
+ &attr_throttle_reason_pl4.attr,
+ &attr_throttle_reason_thermal.attr,
+ &attr_throttle_reason_prochot.attr,
+ &attr_throttle_reason_ratl.attr,
+ &attr_throttle_reason_vr_thermalert.attr,
+ &attr_throttle_reason_vr_tdc.attr,
+ NULL
+};
+
+/*
+ * Scaling for multipliers (aka frequency factors).
+ * The format of the value in the register is u8.8.
+ *
+ * The presentation to userspace is inspired by the perf event framework.
+ * See:
+ * Documentation/ABI/testing/sysfs-bus-event_source-devices-events
+ * for description of:
+ * /sys/bus/event_source/devices/<pmu>/events/<event>.scale
+ *
+ * Summary: Expose two sysfs files for each multiplier.
+ *
+ * 1. File <attr> contains a raw hardware value.
+ * 2. File <attr>.scale contains the multiplicative scale factor to be
+ * used by userspace to compute the actual value.
+ *
+ * So userspace knows that to get the frequency_factor it multiplies the
+ * provided value by the specified scale factor and vice-versa.
+ *
+ * That way there is no precision loss in the kernel interface and API
+ * is future proof should one day the hardware register change to u16.u16,
+ * on some platform. (Or any other fixed point representation.)
+ *
+ * Example:
+ * File <attr> contains the value 2.5, represented as u8.8 0x0280, which
+ * is comprised of:
+ * - an integer part of 2
+ * - a fractional part of 0x80 (representing 0x80 / 2^8 == 0x80 / 256).
+ * File <attr>.scale contains a string representation of floating point
+ * value 0.00390625 (which is (1 / 256)).
+ * Userspace computes the actual value:
+ * 0x0280 * 0.00390625 -> 2.5
+ * or converts an actual value to the value to be written into <attr>:
+ * 2.5 / 0.00390625 -> 0x0280
+ */
+
+#define U8_8_VAL_MASK 0xffff
+#define U8_8_SCALE_TO_VALUE "0.00390625"
+
+static ssize_t freq_factor_scale_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ return sysfs_emit(buff, "%s\n", U8_8_SCALE_TO_VALUE);
+}
+
+static u32 media_ratio_mode_to_factor(u32 mode)
+{
+ /* 0 -> 0, 1 -> 256, 2 -> 128 */
+ return !mode ? mode : 256 / mode;
+}
+
+static ssize_t media_freq_factor_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ intel_wakeref_t wakeref;
+ u32 mode;
+
+ /*
+ * Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
+ * GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
+ */
+ if (IS_XEHPSDV(gt->i915) &&
+ slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
+ /*
+ * For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
+ * the media_ratio_mode, just return the cached media ratio
+ */
+ mode = slpc->media_ratio_mode;
+ } else {
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
+ mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
+ }
+
+ return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
+}
+
+static ssize_t media_freq_factor_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ u32 factor, mode;
+ int err;
+
+ err = kstrtou32(buff, 0, &factor);
+ if (err)
+ return err;
+
+ for (mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
+ mode <= SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; mode++)
+ if (factor == media_ratio_mode_to_factor(mode))
+ break;
+
+ if (mode > SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO)
+ return -EINVAL;
+
+ err = intel_guc_slpc_set_media_ratio_mode(slpc, mode);
+ if (!err) {
+ slpc->media_ratio_mode = mode;
+ DRM_DEBUG("Set slpc->media_ratio_mode to %d", mode);
+ }
+ return err ?: count;
+}
+
+static ssize_t media_RP0_freq_mhz_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ u32 val;
+ int err;
+
+ err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
+ PCODE_MBOX_FC_SC_READ_FUSED_P0,
+ PCODE_MBOX_DOMAIN_MEDIAFF, &val);
+
+ if (err)
+ return err;
+
+ /* Fused media RP0 read from pcode is in units of 50 MHz */
+ val *= GT_FREQUENCY_MULTIPLIER;
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+static ssize_t media_RPn_freq_mhz_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ u32 val;
+ int err;
+
+ err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
+ PCODE_MBOX_FC_SC_READ_FUSED_PN,
+ PCODE_MBOX_DOMAIN_MEDIAFF, &val);
+
+ if (err)
+ return err;
+
+ /* Fused media RPn read from pcode is in units of 50 MHz */
+ val *= GT_FREQUENCY_MULTIPLIER;
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+INTEL_GT_ATTR_RW(media_freq_factor);
+static struct kobj_attribute attr_media_freq_factor_scale =
+ __ATTR(media_freq_factor.scale, 0444, freq_factor_scale_show, NULL);
+INTEL_GT_ATTR_RO(media_RP0_freq_mhz);
+INTEL_GT_ATTR_RO(media_RPn_freq_mhz);
+
+static const struct attribute *media_perf_power_attrs[] = {
+ &attr_media_freq_factor.attr,
+ &attr_media_freq_factor_scale.attr,
+ &attr_media_RP0_freq_mhz.attr,
+ &attr_media_RPn_freq_mhz.attr,
+ NULL
+};
+
+static ssize_t
+default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.min_freq);
+}
+
+static struct kobj_attribute default_min_freq_mhz =
+__ATTR(rps_min_freq_mhz, 0444, default_min_freq_mhz_show, NULL);
+
+static ssize_t
+default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.max_freq);
+}
+
+static struct kobj_attribute default_max_freq_mhz =
+__ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
+
+static const struct attribute * const rps_defaults_attrs[] = {
+ &default_min_freq_mhz.attr,
+ &default_max_freq_mhz.attr,
+ NULL
+};
+
+static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj)
+{
+ const struct attribute * const *attrs;
+ struct attribute *vlv_attr;
+ int ret;
+
+ if (GRAPHICS_VER(gt->i915) < 6)
+ return 0;
+
+ if (is_object_gt(kobj)) {
+ attrs = gen6_rps_attrs;
+ vlv_attr = &attr_rps_vlv_rpe_freq_mhz.attr;
+ } else {
+ attrs = gen6_gt_attrs;
+ vlv_attr = &dev_attr_gt_vlv_rpe_freq_mhz.attr;
+ }
+
+ ret = sysfs_create_files(kobj, attrs);
+ if (ret)
+ return ret;
+
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+ ret = sysfs_create_file(kobj, vlv_attr);
+
+ return ret;
+}
+
+void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
+{
+ int ret;
+
+ intel_sysfs_rc6_init(gt, kobj);
+
+ ret = intel_sysfs_rps_init(gt, kobj);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RPS sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+
+ /* end of the legacy interfaces */
+ if (!is_object_gt(kobj))
+ return;
+
+ ret = sysfs_create_file(kobj, &attr_punit_req_freq_mhz.attr);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
+ gt->info.id, ERR_PTR(ret));
+
+ if (GRAPHICS_VER(gt->i915) >= 11) {
+ ret = sysfs_create_files(kobj, throttle_reason_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u throttle sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+ }
+
+ if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
+ ret = sysfs_create_files(kobj, media_perf_power_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
+
+ ret = sysfs_create_files(gt->sysfs_defaults, rps_defaults_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to add gt%u rps defaults (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h
new file mode 100644
index 000000000..f567105a4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SYSFS_GT_PM_H__
+#define __SYSFS_GT_PM_H__
+
+#include <linux/kobject.h>
+
+#include "intel_gt_types.h"
+
+void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj);
+
+#endif /* SYSFS_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
new file mode 100644
index 000000000..184ee9b11
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_TYPES__
+#define __INTEL_GT_TYPES__
+
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/seqlock.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "uc/intel_uc.h"
+#include "intel_gsc.h"
+
+#include "i915_vma.h"
+#include "intel_engine_types.h"
+#include "intel_gt_buffer_pool_types.h"
+#include "intel_hwconfig.h"
+#include "intel_llc_types.h"
+#include "intel_reset_types.h"
+#include "intel_rc6_types.h"
+#include "intel_rps_types.h"
+#include "intel_migrate_types.h"
+#include "intel_wakeref.h"
+#include "pxp/intel_pxp_types.h"
+
+struct drm_i915_private;
+struct i915_ggtt;
+struct intel_engine_cs;
+struct intel_uncore;
+
+struct intel_mmio_range {
+ u32 start;
+ u32 end;
+};
+
+/*
+ * The hardware has multiple kinds of multicast register ranges that need
+ * special register steering (and future platforms are expected to add
+ * additional types).
+ *
+ * During driver startup, we initialize the steering control register to
+ * direct reads to a slice/subslice that are valid for the 'subslice' class
+ * of multicast registers. If another type of steering does not have any
+ * overlap in valid steering targets with 'subslice' style registers, we will
+ * need to explicitly re-steer reads of registers of the other type.
+ *
+ * Only the replication types that may need additional non-default steering
+ * are listed here.
+ */
+enum intel_steering_type {
+ L3BANK,
+ MSLICE,
+ LNCF,
+
+ /*
+ * On some platforms there are multiple types of MCR registers that
+ * will always return a non-terminated value at instance (0, 0). We'll
+ * lump those all into a single category to keep things simple.
+ */
+ INSTANCE0,
+
+ NUM_STEERING_TYPES
+};
+
+enum intel_submission_method {
+ INTEL_SUBMISSION_RING,
+ INTEL_SUBMISSION_ELSP,
+ INTEL_SUBMISSION_GUC,
+};
+
+struct gt_defaults {
+ u32 min_freq;
+ u32 max_freq;
+};
+
+enum intel_gt_type {
+ GT_PRIMARY,
+ GT_TILE,
+ GT_MEDIA,
+};
+
+struct intel_gt {
+ struct drm_i915_private *i915;
+ const char *name;
+ enum intel_gt_type type;
+
+ struct intel_uncore *uncore;
+ struct i915_ggtt *ggtt;
+
+ struct intel_uc uc;
+ struct intel_gsc gsc;
+
+ struct {
+ /* Serialize global tlb invalidations */
+ struct mutex invalidate_lock;
+
+ /*
+ * Batch TLB invalidations
+ *
+ * After unbinding the PTE, we need to ensure the TLB
+ * are invalidated prior to releasing the physical pages.
+ * But we only need one such invalidation for all unbinds,
+ * so we track how many TLB invalidations have been
+ * performed since unbind the PTE and only emit an extra
+ * invalidate if no full barrier has been passed.
+ */
+ seqcount_mutex_t seqno;
+ } tlb;
+
+ struct i915_wa_list wa_list;
+
+ struct intel_gt_timelines {
+ spinlock_t lock; /* protects active_list */
+ struct list_head active_list;
+ } timelines;
+
+ struct intel_gt_requests {
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+ } requests;
+
+ struct {
+ struct llist_head list;
+ struct work_struct work;
+ } watchdog;
+
+ struct intel_wakeref wakeref;
+ atomic_t user_wakeref;
+
+ struct list_head closed_vma;
+ spinlock_t closed_lock; /* guards the list of closed_vma */
+
+ ktime_t last_init_time;
+ struct intel_reset reset;
+
+ /**
+ * Is the GPU currently considered idle, or busy executing
+ * userspace requests? Whilst idle, we allow runtime power
+ * management to power down the hardware and display clocks.
+ * In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ intel_wakeref_t awake;
+
+ u32 clock_frequency;
+ u32 clock_period_ns;
+
+ struct intel_llc llc;
+ struct intel_rc6 rc6;
+ struct intel_rps rps;
+
+ spinlock_t *irq_lock;
+ u32 gt_imr;
+ u32 pm_ier;
+ u32 pm_imr;
+
+ u32 pm_guc_events;
+
+ struct {
+ bool active;
+
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqcount_mutex_t lock;
+
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ } stats;
+
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+ [MAX_ENGINE_INSTANCE + 1];
+ enum intel_submission_method submission_method;
+
+ /*
+ * Default address space (either GGTT or ppGTT depending on arch).
+ *
+ * Reserved for exclusive use by the kernel.
+ */
+ struct i915_address_space *vm;
+
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ *
+ * Buffers older than 1s are periodically reaped from the pool,
+ * or may be reclaimed by the shrinker before then.
+ */
+ struct intel_gt_buffer_pool buffer_pool;
+
+ struct i915_vma *scratch;
+
+ struct intel_migrate migrate;
+
+ const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
+
+ struct {
+ u8 groupid;
+ u8 instanceid;
+ } default_steering;
+
+ /*
+ * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
+ */
+ phys_addr_t phys_addr;
+
+ struct intel_gt_info {
+ unsigned int id;
+
+ intel_engine_mask_t engine_mask;
+
+ u32 l3bank_mask;
+
+ u8 num_engines;
+
+ /* General presence of SFC units */
+ u8 sfc_mask;
+
+ /* Media engine access to SFC per instance */
+ u8 vdbox_sfc_access;
+
+ /* Slice/subslice/EU info */
+ struct sseu_dev_info sseu;
+
+ unsigned long mslice_mask;
+
+ /** @hwconfig: hardware configuration data */
+ struct intel_hwconfig hwconfig;
+ } info;
+
+ struct {
+ u8 uc_index;
+ u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
+ } mocs;
+
+ struct intel_pxp pxp;
+
+ /* gt/gtN sysfs */
+ struct kobject sysfs_gt;
+
+ /* sysfs defaults per gt */
+ struct gt_defaults defaults;
+ struct kobject *sysfs_defaults;
+};
+
+struct intel_gt_definition {
+ enum intel_gt_type type;
+ char *name;
+ u32 mapping_base;
+ u32 gsi_offset;
+ intel_engine_mask_t engine_mask;
+};
+
+enum intel_gt_scratch_field {
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
+
+ /* 6 * 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
+
+ /* 4 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
+};
+
+#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
new file mode 100644
index 000000000..f4879f437
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h> /* fault-inject.h is not standalone! */
+
+#include <linux/fault-inject.h>
+#include <linux/sched/mm.h>
+
+#include <drm/drm_cache.h>
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_trace.h"
+#include "i915_utils.h"
+#include "intel_gt.h"
+#include "intel_gt_regs.h"
+#include "intel_gtt.h"
+
+
+static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
+{
+ return IS_BROXTON(i915) && i915_vtd_active(i915);
+}
+
+bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
+}
+
+struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
+{
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * To avoid severe over-allocation when dealing with min_page_size
+ * restrictions, we override that behaviour here by allowing an object
+ * size and page layout which can be smaller. In practice this should be
+ * totally fine, since GTT paging structures are not typically inserted
+ * into the GTT.
+ *
+ * Note that we also hit this path for the scratch page, and for this
+ * case it might need to be 64K, but that should work fine here since we
+ * used the passed in size for the page size, which should ensure it
+ * also has the same alignment.
+ */
+ obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
+ vm->lmem_pt_obj_flags);
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
+ if (!IS_ERR(obj)) {
+ obj->base.resv = i915_vm_resv_get(vm);
+ obj->shares_resv_from = vm;
+ }
+
+ return obj;
+}
+
+struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
+{
+ struct drm_i915_gem_object *obj;
+
+ if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
+ i915_gem_shrink_all(vm->i915);
+
+ obj = i915_gem_object_create_internal(vm->i915, sz);
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
+ if (!IS_ERR(obj)) {
+ obj->base.resv = i915_vm_resv_get(vm);
+ obj->shares_resv_from = vm;
+ }
+
+ return obj;
+}
+
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+{
+ enum i915_map_type type;
+ void *vaddr;
+
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, type);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ i915_gem_object_make_unshrinkable(obj);
+ return 0;
+}
+
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+{
+ enum i915_map_type type;
+ void *vaddr;
+
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map(obj, type);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ i915_gem_object_make_unshrinkable(obj);
+ return 0;
+}
+
+static void clear_vm_list(struct list_head *list)
+{
+ struct i915_vma *vma, *vn;
+
+ list_for_each_entry_safe(vma, vn, list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!i915_gem_object_get_rcu(obj)) {
+ /*
+ * Object is dying, but has not yet cleared its
+ * vma list.
+ * Unbind the dying vma to ensure our list
+ * is completely drained. We leave the destruction to
+ * the object destructor to avoid the vma
+ * disappearing under it.
+ */
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+
+ /* Remove from the unbound list */
+ list_del_init(&vma->vm_link);
+
+ /*
+ * Delay the vm and vm mutex freeing until the
+ * object is done with destruction.
+ */
+ i915_vm_resv_get(vma->vm);
+ vma->vm_ddestroy = true;
+ } else {
+ i915_vma_destroy_locked(vma);
+ i915_gem_object_put(obj);
+ }
+
+ }
+}
+
+static void __i915_vm_close(struct i915_address_space *vm)
+{
+ mutex_lock(&vm->mutex);
+
+ clear_vm_list(&vm->bound_list);
+ clear_vm_list(&vm->unbound_list);
+
+ /* Check for must-fix unanticipated side-effects */
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ GEM_BUG_ON(!list_empty(&vm->unbound_list));
+
+ mutex_unlock(&vm->mutex);
+}
+
+/* lock the vm into the current ww, if we lock one, we lock all */
+int i915_vm_lock_objects(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww)
+{
+ if (vm->scratch[0]->base.resv == &vm->_resv) {
+ return i915_gem_object_lock(vm->scratch[0], ww);
+ } else {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ /* We borrowed the scratch page from ggtt, take the top level object */
+ return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
+ }
+}
+
+void i915_address_space_fini(struct i915_address_space *vm)
+{
+ drm_mm_takedown(&vm->mm);
+}
+
+/**
+ * i915_vm_resv_release - Final struct i915_address_space destructor
+ * @kref: Pointer to the &i915_address_space.resv_ref member.
+ *
+ * This function is called when the last lock sharer no longer shares the
+ * &i915_address_space._resv lock, and also if we raced when
+ * destroying a vma by the vma destruction
+ */
+void i915_vm_resv_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, typeof(*vm), resv_ref);
+
+ dma_resv_fini(&vm->_resv);
+ mutex_destroy(&vm->mutex);
+
+ kfree(vm);
+}
+
+static void __i915_vm_release(struct work_struct *work)
+{
+ struct i915_address_space *vm =
+ container_of(work, struct i915_address_space, release_work);
+
+ __i915_vm_close(vm);
+
+ /* Synchronize async unbinds. */
+ i915_vma_resource_bind_dep_sync_all(vm);
+
+ vm->cleanup(vm);
+ i915_address_space_fini(vm);
+
+ i915_vm_resv_put(vm);
+}
+
+void i915_vm_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, struct i915_address_space, ref);
+
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ trace_i915_ppgtt_release(vm);
+
+ queue_work(vm->i915->wq, &vm->release_work);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass)
+{
+ kref_init(&vm->ref);
+
+ /*
+ * Special case for GGTT that has already done an early
+ * kref_init here.
+ */
+ if (!kref_read(&vm->resv_ref))
+ kref_init(&vm->resv_ref);
+
+ vm->pending_unbind = RB_ROOT_CACHED;
+ INIT_WORK(&vm->release_work, __i915_vm_release);
+
+ /*
+ * The vm->mutex must be reclaim safe (for use in the shrinker).
+ * Do a dummy acquire now under fs_reclaim so that any allocation
+ * attempt holding the lock is immediately reported by lockdep.
+ */
+ mutex_init(&vm->mutex);
+ lockdep_set_subclass(&vm->mutex, subclass);
+
+ if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+ } else {
+ /*
+ * CHV + BXT VTD workaround use stop_machine(),
+ * which is allowed to allocate memory. This means &vm->mutex
+ * is the outer lock, and in theory we can allocate memory inside
+ * it through stop_machine().
+ *
+ * Add the annotation for this, we use trylock in shrinker.
+ */
+ mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
+ might_alloc(GFP_KERNEL);
+ mutex_release(&vm->mutex.dep_map, _THIS_IP_);
+ }
+ dma_resv_init(&vm->_resv);
+
+ GEM_BUG_ON(!vm->total);
+ drm_mm_init(&vm->mm, 0, vm->total);
+
+ memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
+ ARRAY_SIZE(vm->min_alignment));
+
+ if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915) &&
+ subclass == VM_CLASS_PPGTT) {
+ vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_2M;
+ vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_2M;
+ } else if (HAS_64K_PAGES(vm->i915)) {
+ vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
+ vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
+ }
+
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+ INIT_LIST_HEAD(&vm->bound_list);
+ INIT_LIST_HEAD(&vm->unbound_list);
+}
+
+void *__px_vaddr(struct drm_i915_gem_object *p)
+{
+ enum i915_map_type type;
+
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return page_unpack_bits(p->mm.mapping, &type);
+}
+
+dma_addr_t __px_dma(struct drm_i915_gem_object *p)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return sg_dma_address(p->mm.pages->sgl);
+}
+
+struct page *__px_page(struct drm_i915_gem_object *p)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return sg_page(p->mm.pages->sgl);
+}
+
+void
+fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
+{
+ void *vaddr = __px_vaddr(p);
+
+ memset64(vaddr, val, count);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+}
+
+static void poison_scratch_page(struct drm_i915_gem_object *scratch)
+{
+ void *vaddr = __px_vaddr(scratch);
+ u8 val;
+
+ val = 0;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ val = POISON_FREE;
+
+ memset(vaddr, val, scratch->base.size);
+ drm_clflush_virt_range(vaddr, scratch->base.size);
+}
+
+int setup_scratch_page(struct i915_address_space *vm)
+{
+ unsigned long size;
+
+ /*
+ * In order to utilize 64K pages for an object with a size < 2M, we will
+ * need to support a 64K scratch page, given that every 16th entry for a
+ * page-table operating in 64K mode must point to a properly aligned 64K
+ * region, including any PTEs which happen to point to scratch.
+ *
+ * This is only relevant for the 48b PPGTT where we support
+ * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+ * scratch (read-only) between all vm, we create one 64k scratch page
+ * for all.
+ */
+ size = I915_GTT_PAGE_SIZE_4K;
+ if (i915_vm_is_4lvl(vm) &&
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
+ size = I915_GTT_PAGE_SIZE_64K;
+
+ do {
+ struct drm_i915_gem_object *obj;
+
+ obj = vm->alloc_scratch_dma(vm, size);
+ if (IS_ERR(obj))
+ goto skip;
+
+ if (map_pt_dma(vm, obj))
+ goto skip_obj;
+
+ /* We need a single contiguous page for our scratch */
+ if (obj->mm.page_sizes.sg < size)
+ goto skip_obj;
+
+ /* And it needs to be correspondingly aligned */
+ if (__px_dma(obj) & (size - 1))
+ goto skip_obj;
+
+ /*
+ * Use a non-zero scratch page for debugging.
+ *
+ * We want a value that should be reasonably obvious
+ * to spot in the error state, while also causing a GPU hang
+ * if executed. We prefer using a clear page in production, so
+ * should it ever be accidentally used, the effect should be
+ * fairly benign.
+ */
+ poison_scratch_page(obj);
+
+ vm->scratch[0] = obj;
+ vm->scratch_order = get_order(size);
+ return 0;
+
+skip_obj:
+ i915_gem_object_put(obj);
+skip:
+ if (size == I915_GTT_PAGE_SIZE_4K)
+ return -ENOMEM;
+
+ /*
+ * If we need 64K minimum GTT pages for device local-memory,
+ * like on XEHPSDV, then we need to fail the allocation here,
+ * otherwise we can't safely support the insertion of
+ * local-memory pages for this vm, since the HW expects the
+ * correct physical alignment and size when the page-table is
+ * operating in 64K GTT mode, which includes any scratch PTEs,
+ * since userspace can still touch them.
+ */
+ if (HAS_64K_PAGES(vm->i915))
+ return -ENOMEM;
+
+ size = I915_GTT_PAGE_SIZE_4K;
+ } while (1);
+}
+
+void free_scratch(struct i915_address_space *vm)
+{
+ int i;
+
+ if (!vm->scratch[0])
+ return;
+
+ for (i = 0; i <= vm->top; i++)
+ i915_gem_object_put(vm->scratch[i]);
+}
+
+void gtt_write_workarounds(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
+ /*
+ * This function is for gtt related workarounds. This function is
+ * called on driver load and after a GPU reset, so you can place
+ * workarounds here even if they get overwritten by GPU reset.
+ */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
+ if (IS_BROADWELL(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+ else if (IS_CHERRYVIEW(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+ else if (IS_GEN9_LP(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+ else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+
+ /*
+ * To support 64K PTEs we need to first enable the use of the
+ * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+ * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+ * shouldn't be needed after GEN10.
+ *
+ * 64K pages were first introduced from BDW+, although technically they
+ * only *work* from gen9+. For pre-BDW we instead have the option for
+ * 32K pages, but we don't currently have any support for it in our
+ * driver.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+ GRAPHICS_VER(i915) <= 10)
+ intel_uncore_rmw(uncore,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ 0,
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
+
+ if (IS_GRAPHICS_VER(i915, 8, 11)) {
+ bool can_use_gtt_cache = true;
+
+ /*
+ * According to the BSpec if we use 2M/1G pages then we also
+ * need to disable the GTT cache. At least on BDW we can see
+ * visual corruption when using 2M pages, and not disabling the
+ * GTT cache.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
+ can_use_gtt_cache = false;
+
+ /* WaGttCachingOffByDefault */
+ intel_uncore_write(uncore,
+ HSW_GTT_CACHE_EN,
+ can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
+ drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
+ }
+}
+
+static void tgl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ /* TGL doesn't support LLC or AGE settings */
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
+static void icl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(0),
+ GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(1),
+ GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(2),
+ GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(3),
+ GEN8_PPAT_UC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(4),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(5),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(6),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(7),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+}
+
+/*
+ * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases.
+ */
+static void bdw_setup_private_ppat(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+ u64 pat;
+
+ pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
+ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
+ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+ GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+ GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+ GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+ /* for scanout with eLLC */
+ if (GRAPHICS_VER(i915) >= 9)
+ pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
+ else
+ pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+static void chv_setup_private_ppat(struct intel_uncore *uncore)
+{
+ u64 pat;
+
+ /*
+ * Map WB on BDW to snooped on CHV.
+ *
+ * Only the snoop bit has meaning for CHV, the rest is
+ * ignored.
+ *
+ * The hardware will never snoop for certain types of accesses:
+ * - CPU GTT (GMADR->GGTT->no snoop->memory)
+ * - PPGTT page tables
+ * - some other special cycles
+ *
+ * As with BDW, we also need to consider the following for GT accesses:
+ * "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * Which means we must set the snoop bit in PAT entry 0
+ * in order to keep the global status page working.
+ */
+
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+void setup_private_pat(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
+
+ if (GRAPHICS_VER(i915) >= 12)
+ tgl_setup_private_ppat(uncore);
+ else if (GRAPHICS_VER(i915) >= 11)
+ icl_setup_private_ppat(uncore);
+ else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
+ chv_setup_private_ppat(uncore);
+ else
+ bdw_setup_private_ppat(uncore);
+}
+
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ return vma;
+}
+
+struct i915_vma *
+__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
+{
+ struct i915_vma *vma;
+ int err;
+
+ vma = __vm_create_scratch_for_read(vm, size);
+ if (IS_ERR(vma))
+ return vma;
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_gtt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
new file mode 100644
index 000000000..c0ca53cba
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -0,0 +1,696 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Please try to maintain the following order within this file unless it makes
+ * sense to do otherwise. From top to bottom:
+ * 1. typedefs
+ * 2. #defines, and macros
+ * 3. structure definitions
+ * 4. function prototypes
+ *
+ * Within each section, please try to order by generation in ascending order,
+ * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
+ */
+
+#ifndef __INTEL_GTT_H__
+#define __INTEL_GTT_H__
+
+#include <linux/io-mapping.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/pagevec.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_mm.h>
+
+#include "gt/intel_reset.h"
+#include "i915_selftest.h"
+#include "i915_vma_resource.h"
+#include "i915_vma_types.h"
+#include "i915_params.h"
+#include "intel_memory_region.h"
+
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
+#define DBG(...) trace_printk(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
+#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
+
+#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
+
+#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
+#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
+typedef u32 gen6_pte_t;
+typedef u64 gen8_pte_t;
+
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
+
+#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
+#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
+#define I915_PDES 512
+#define I915_PDE_MASK (I915_PDES - 1)
+
+/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_VALID REG_BIT(0)
+
+#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
+#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
+#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+#define GEN6_PDE_SHIFT 22
+#define GEN6_PDE_VALID REG_BIT(0)
+#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
+
+#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
+
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
+#define BYT_PTE_WRITEABLE REG_BIT(1)
+
+#define GEN12_PPGTT_PTE_LM BIT_ULL(11)
+
+#define GEN12_GGTT_PTE_LM BIT_ULL(1)
+
+#define GEN12_PDE_64K BIT(6)
+
+/*
+ * Cacheability Control is a 4-bit value. The low three bits are stored in bits
+ * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
+#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+#define HSW_PTE_UNCACHED (0)
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
+
+/*
+ * GEN8 32b style address is defined as a 3 level page table:
+ * 31:30 | 29:21 | 20:12 | 11:0
+ * PDPE | PDE | PTE | offset
+ * The difference as compared to normal x86 3 level page table is the PDPEs are
+ * programmed via register.
+ *
+ * GEN8 48b style address is defined as a 4 level page table:
+ * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
+ * PML4E | PDPE | PDE | PTE | offset
+ */
+#define GEN8_3LVL_PDPES 4
+
+#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE 0 /* WB LLC */
+#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
+
+#define CHV_PPAT_SNOOP REG_BIT(6)
+#define GEN8_PPAT_AGE(x) ((x)<<4)
+#define GEN8_PPAT_LLCeLLC (3<<2)
+#define GEN8_PPAT_LLCELLC (2<<2)
+#define GEN8_PPAT_LLC (1<<2)
+#define GEN8_PPAT_WB (3<<0)
+#define GEN8_PPAT_WT (2<<0)
+#define GEN8_PPAT_WC (1<<0)
+#define GEN8_PPAT_UC (0<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
+#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
+
+#define GEN8_PAGE_PRESENT BIT_ULL(0)
+#define GEN8_PAGE_RW BIT_ULL(1)
+
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M BIT(7)
+
+enum i915_cache_level;
+
+struct drm_i915_gem_object;
+struct i915_fence_reg;
+struct i915_vma;
+struct intel_gt;
+
+#define for_each_sgt_daddr(__dp, __iter, __sgt) \
+ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
+
+struct i915_page_table {
+ struct drm_i915_gem_object *base;
+ union {
+ atomic_t used;
+ struct i915_page_table *stash;
+ };
+ bool is_compact;
+};
+
+struct i915_page_directory {
+ struct i915_page_table pt;
+ spinlock_t lock;
+ void **entry;
+};
+
+#define __px_choose_expr(x, type, expr, other) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), type) || \
+ __builtin_types_compatible_p(typeof(x), const type), \
+ ({ type __x = (type)(x); expr; }), \
+ other)
+
+#define px_base(px) \
+ __px_choose_expr(px, struct drm_i915_gem_object *, __x, \
+ __px_choose_expr(px, struct i915_page_table *, __x->base, \
+ __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
+ (void)0)))
+
+struct page *__px_page(struct drm_i915_gem_object *p);
+dma_addr_t __px_dma(struct drm_i915_gem_object *p);
+#define px_dma(px) (__px_dma(px_base(px)))
+
+void *__px_vaddr(struct drm_i915_gem_object *p);
+#define px_vaddr(px) (__px_vaddr(px_base(px)))
+
+#define px_pt(px) \
+ __px_choose_expr(px, struct i915_page_table *, __x, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
+ (void)0))
+#define px_used(px) (&px_pt(px)->used)
+
+struct i915_vm_pt_stash {
+ /* preallocated chains of page tables/directories */
+ struct i915_page_table *pt[2];
+ /*
+ * Optionally override the alignment/size of the physical page that
+ * contains each PT. If not set defaults back to the usual
+ * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging
+ * structures. MUST be a power-of-two. ONLY applicable on discrete
+ * platforms.
+ */
+ int pt_sz;
+};
+
+struct i915_vma_ops {
+ /* Map an object into an address space with the given cache flags. */
+ void (*bind_vma)(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ /*
+ * Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page.
+ */
+ void (*unbind_vma)(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res);
+
+};
+
+struct i915_address_space {
+ struct kref ref;
+ struct work_struct release_work;
+
+ struct drm_mm mm;
+ struct intel_gt *gt;
+ struct drm_i915_private *i915;
+ struct device *dma;
+ u64 total; /* size addr space maps (ex. 2GB for ggtt) */
+ u64 reserved; /* size addr space reserved */
+ u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1];
+
+ unsigned int bind_async_flags;
+
+ struct mutex mutex; /* protects vma and our lists */
+
+ struct kref resv_ref; /* kref to keep the reservation lock alive. */
+ struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
+#define VM_CLASS_GGTT 0
+#define VM_CLASS_PPGTT 1
+#define VM_CLASS_DPT 2
+
+ struct drm_i915_gem_object *scratch[4];
+ /**
+ * List of vma currently bound.
+ */
+ struct list_head bound_list;
+
+ /**
+ * List of vmas not yet bound or evicted.
+ */
+ struct list_head unbound_list;
+
+ /* Global GTT */
+ bool is_ggtt:1;
+
+ /* Display page table */
+ bool is_dpt:1;
+
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
+
+ /* Skip pte rewrite on unbind for suspend. Protected by @mutex */
+ bool skip_pte_rewrite:1;
+
+ u8 top;
+ u8 pd_shift;
+ u8 scratch_order;
+
+ /* Flags used when creating page-table objects for this vm */
+ unsigned long lmem_pt_obj_flags;
+
+ /* Interval tree for pending unbind vma resources */
+ struct rb_root_cached pending_unbind;
+
+ struct drm_i915_gem_object *
+ (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
+ struct drm_i915_gem_object *
+ (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
+
+ u64 (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags); /* Create a valid PTE */
+#define PTE_READ_ONLY BIT(0)
+#define PTE_LM BIT(1)
+
+ void (*allocate_va_range)(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 start, u64 length);
+ void (*clear_range)(struct i915_address_space *vm,
+ u64 start, u64 length);
+ void (*insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*raw_insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*raw_insert_entries)(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
+ void (*cleanup)(struct i915_address_space *vm);
+
+ void (*foreach)(struct i915_address_space *vm,
+ u64 start, u64 length,
+ void (*fn)(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data),
+ void *data);
+
+ struct i915_vma_ops vma_ops;
+
+ I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
+ I915_SELFTEST_DECLARE(bool scrub_64K);
+};
+
+/*
+ * The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_ggtt {
+ struct i915_address_space vm;
+
+ struct io_mapping iomap; /* Mapping to our CPU mappable region */
+ struct resource gmadr; /* GMADR resource */
+ resource_size_t mappable_end; /* End offset that we can CPU map */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+ void (*invalidate)(struct i915_ggtt *ggtt);
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_ppgtt *alias;
+
+ bool do_idle_maps;
+
+ /**
+ * @pte_lost: Are ptes lost on resume?
+ *
+ * Whether the system was recently restored from hibernate and
+ * thus may have lost pte content.
+ */
+ bool pte_lost;
+
+ /**
+ * @probed_pte: Probed pte value on suspend. Re-checked on resume.
+ */
+ u64 probed_pte;
+
+ int mtrr;
+
+ /** Bit 6 swizzling required for X tiling */
+ u32 bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ u32 bit_6_swizzle_y;
+
+ u32 pin_bias;
+
+ unsigned int num_fences;
+ struct i915_fence_reg *fence_regs;
+ struct list_head fence_list;
+
+ /**
+ * List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ struct mutex error_mutex;
+ struct drm_mm_node error_capture;
+ struct drm_mm_node uc_fw;
+};
+
+struct i915_ppgtt {
+ struct i915_address_space vm;
+
+ struct i915_page_directory *pd;
+};
+
+#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+#define i915_is_dpt(vm) ((vm)->is_dpt)
+#define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
+
+bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915);
+
+int __must_check
+i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
+
+static inline bool
+i915_vm_is_4lvl(const struct i915_address_space *vm)
+{
+ return (vm->total - 1) >> 32;
+}
+
+static inline bool
+i915_vm_has_scratch_64K(struct i915_address_space *vm)
+{
+ return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
+}
+
+static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
+ enum intel_memory_type type)
+{
+ /* avoid INTEL_MEMORY_MOCK overflow */
+ if ((int)type >= ARRAY_SIZE(vm->min_alignment))
+ type = INTEL_MEMORY_SYSTEM;
+
+ return vm->min_alignment[type];
+}
+
+static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
+ struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
+ enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM;
+
+ return i915_vm_min_alignment(vm, type);
+}
+
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+ return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, vm);
+}
+
+static inline struct i915_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
+ GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
+ return container_of(vm, struct i915_ppgtt, vm);
+}
+
+static inline struct i915_address_space *
+i915_vm_get(struct i915_address_space *vm)
+{
+ kref_get(&vm->ref);
+ return vm;
+}
+
+static inline struct i915_address_space *
+i915_vm_tryget(struct i915_address_space *vm)
+{
+ return kref_get_unless_zero(&vm->ref) ? vm : NULL;
+}
+
+static inline void assert_vm_alive(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!kref_read(&vm->ref));
+}
+
+/**
+ * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
+ * @vm: The vm whose reservation lock we want to share.
+ *
+ * Return: A pointer to the vm's reservation lock.
+ */
+static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
+{
+ kref_get(&vm->resv_ref);
+ return &vm->_resv;
+}
+
+void i915_vm_release(struct kref *kref);
+
+void i915_vm_resv_release(struct kref *kref);
+
+static inline void i915_vm_put(struct i915_address_space *vm)
+{
+ kref_put(&vm->ref, i915_vm_release);
+}
+
+/**
+ * i915_vm_resv_put - Release a reference on the vm's reservation lock
+ * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
+ */
+static inline void i915_vm_resv_put(struct i915_address_space *vm)
+{
+ kref_put(&vm->resv_ref, i915_vm_resv_release);
+}
+
+void i915_address_space_init(struct i915_address_space *vm, int subclass);
+void i915_address_space_fini(struct i915_address_space *vm);
+
+static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
+{
+ const u32 mask = NUM_PTE(pde_shift) - 1;
+
+ return (address >> PAGE_SHIFT) & mask;
+}
+
+/*
+ * Helper to counts the number of PTEs within the given length. This count
+ * does not cross a page table boundary, so the max value would be
+ * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
+ */
+static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
+{
+ const u64 mask = ~((1ULL << pde_shift) - 1);
+ u64 end;
+
+ GEM_BUG_ON(length == 0);
+ GEM_BUG_ON(offset_in_page(addr | length));
+
+ end = addr + length;
+
+ if ((addr & mask) != (end & mask))
+ return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
+
+ return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
+}
+
+static inline u32 i915_pde_index(u64 addr, u32 shift)
+{
+ return (addr >> shift) & I915_PDE_MASK;
+}
+
+static inline struct i915_page_table *
+i915_pt_entry(const struct i915_page_directory * const pd,
+ const unsigned short n)
+{
+ return pd->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pd_entry(const struct i915_page_directory * const pdp,
+ const unsigned short n)
+{
+ return pdp->entry[n];
+}
+
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
+{
+ struct i915_page_table *pt = ppgtt->pd->entry[n];
+
+ return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
+void intel_ggtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
+void intel_ggtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res);
+
+int i915_ggtt_probe_hw(struct drm_i915_private *i915);
+int i915_ggtt_init_hw(struct drm_i915_private *i915);
+int i915_ggtt_enable_hw(struct drm_i915_private *i915);
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
+int i915_init_ggtt(struct drm_i915_private *i915);
+void i915_ggtt_driver_release(struct drm_i915_private *i915);
+void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
+
+static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
+{
+ return ggtt->mappable_end > 0;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt);
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
+
+void i915_ggtt_suspend_vm(struct i915_address_space *vm);
+bool i915_ggtt_resume_vm(struct i915_address_space *vm);
+void i915_ggtt_suspend(struct i915_ggtt *gtt);
+void i915_ggtt_resume(struct i915_ggtt *ggtt);
+
+/**
+ * i915_ggtt_mark_pte_lost - Mark ggtt ptes as lost or clear such a marking
+ * @i915 The device private.
+ * @val whether the ptes should be marked as lost.
+ *
+ * In some cases pte content is retained across suspend, but typically lost
+ * across hibernate. Typically they should be marked as lost on
+ * hibernation restore and such marking cleared on suspend.
+ */
+void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
+
+void
+fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
+
+#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
+#define fill32_px(px, v) do { \
+ u64 v__ = lower_32_bits(v); \
+ fill_px((px), v__ << 32 | v__); \
+} while (0)
+
+int setup_scratch_page(struct i915_address_space *vm);
+void free_scratch(struct i915_address_space *vm);
+
+struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
+struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
+struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz);
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
+struct i915_page_directory *__alloc_pd(int npde);
+
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+
+void free_px(struct i915_address_space *vm,
+ struct i915_page_table *pt, int lvl);
+#define free_pt(vm, px) free_px(vm, px, 0)
+#define free_pd(vm, px) free_px(vm, px_pt(px), 1)
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table *pt,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
+
+#define set_pd_entry(pd, idx, to) \
+ __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct drm_i915_gem_object * const scratch);
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct drm_i915_gem_object * const scratch);
+void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
+
+void ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
+void ppgtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res);
+
+void gtt_write_workarounds(struct intel_gt *gt);
+
+void setup_private_pat(struct intel_uncore *uncore);
+
+int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 size);
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash);
+void i915_vm_free_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash);
+
+struct i915_vma *
+__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+
+struct i915_vma *
+__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
+
+static inline struct sgt_dma {
+ struct scatterlist *sg;
+ dma_addr_t dma, max;
+} sgt_dma(struct i915_vma_resource *vma_res) {
+ struct scatterlist *sg = vma_res->bi.pages->sgl;
+ dma_addr_t addr = sg_dma_address(sg);
+
+ return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_hwconfig.h b/drivers/gpu/drm/i915/gt/intel_hwconfig.h
new file mode 100644
index 000000000..322290780
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_hwconfig.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _INTEL_HWCONFIG_H_
+#define _INTEL_HWCONFIG_H_
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+struct intel_hwconfig {
+ u32 size;
+ void *ptr;
+};
+
+int intel_gt_init_hwconfig(struct intel_gt *gt);
+void intel_gt_fini_hwconfig(struct intel_gt *gt);
+
+#endif /* _INTEL_HWCONFIG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
new file mode 100644
index 000000000..1d19c073b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <asm/tsc.h>
+#include <linux/cpufreq.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pcode.h"
+#include "intel_rps.h"
+
+struct ia_constants {
+ unsigned int min_gpu_freq;
+ unsigned int max_gpu_freq;
+
+ unsigned int min_ring_freq;
+ unsigned int max_ia_freq;
+};
+
+static struct intel_gt *llc_to_gt(struct intel_llc *llc)
+{
+ return container_of(llc, struct intel_gt, llc);
+}
+
+static unsigned int cpu_max_MHz(void)
+{
+ struct cpufreq_policy *policy;
+ unsigned int max_khz;
+
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ max_khz = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ } else {
+ /*
+ * Default to measured freq if none found, PCU will ensure we
+ * don't go over
+ */
+ max_khz = tsc_khz;
+ }
+
+ return max_khz / 1000;
+}
+
+static bool get_ia_constants(struct intel_llc *llc,
+ struct ia_constants *consts)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ if (!HAS_LLC(i915) || IS_DGFX(i915))
+ return false;
+
+ consts->max_ia_freq = cpu_max_MHz();
+
+ consts->min_ring_freq =
+ intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf;
+ /* convert DDR frequency from units of 266.6MHz to bandwidth */
+ consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
+
+ consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
+ consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
+
+ return true;
+}
+
+static void calc_ia_freq(struct intel_llc *llc,
+ unsigned int gpu_freq,
+ const struct ia_constants *consts,
+ unsigned int *out_ia_freq,
+ unsigned int *out_ring_freq)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ const int diff = consts->max_gpu_freq - gpu_freq;
+ unsigned int ia_freq = 0, ring_freq = 0;
+
+ if (GRAPHICS_VER(i915) >= 9) {
+ /*
+ * ring_freq = 2 * GT. ring_freq is in 100MHz units
+ * No floor required for ring frequency on SKL.
+ */
+ ring_freq = gpu_freq;
+ } else if (GRAPHICS_VER(i915) >= 8) {
+ /* max(2 * GT, DDR). NB: GT is 50MHz units */
+ ring_freq = max(consts->min_ring_freq, gpu_freq);
+ } else if (IS_HASWELL(i915)) {
+ ring_freq = mult_frac(gpu_freq, 5, 4);
+ ring_freq = max(consts->min_ring_freq, ring_freq);
+ /* leave ia_freq as the default, chosen by cpufreq */
+ } else {
+ const int min_freq = 15;
+ const int scale = 180;
+
+ /*
+ * On older processors, there is no separate ring
+ * clock domain, so in order to boost the bandwidth
+ * of the ring, we need to upclock the CPU (ia_freq).
+ *
+ * For GPU frequencies less than 750MHz,
+ * just use the lowest ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = consts->max_ia_freq - diff * scale / 2;
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ }
+
+ *out_ia_freq = ia_freq;
+ *out_ring_freq = ring_freq;
+}
+
+static void gen6_update_ring_freq(struct intel_llc *llc)
+{
+ struct ia_constants consts;
+ unsigned int gpu_freq;
+
+ if (!get_ia_constants(llc, &consts))
+ return;
+
+ /*
+ * Although this is unlikely on any platform during initialization,
+ * let's ensure we don't get accidentally into infinite loop
+ */
+ if (consts.max_gpu_freq <= consts.min_gpu_freq)
+ return;
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = consts.max_gpu_freq;
+ gpu_freq >= consts.min_gpu_freq;
+ gpu_freq--) {
+ unsigned int ia_freq, ring_freq;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+ snb_pcode_write(llc_to_gt(llc)->uncore, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+ ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+ gpu_freq);
+ }
+}
+
+void intel_llc_enable(struct intel_llc *llc)
+{
+ gen6_update_ring_freq(llc);
+}
+
+void intel_llc_disable(struct intel_llc *llc)
+{
+ /* Currently there is no HW configuration to be done to disable. */
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_llc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h
new file mode 100644
index 000000000..0e2e3871c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_H
+#define INTEL_LLC_H
+
+struct intel_llc;
+
+void intel_llc_enable(struct intel_llc *llc);
+void intel_llc_disable(struct intel_llc *llc);
+
+#endif /* INTEL_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h
new file mode 100644
index 000000000..ca5bdf166
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_TYPES_H
+#define INTEL_LLC_TYPES_H
+
+struct intel_llc {
+};
+
+#endif /* INTEL_LLC_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
new file mode 100644
index 000000000..7eb01ff17
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -0,0 +1,1848 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include "gem/i915_gem_lmem.h"
+
+#include "gen8_engine_cs.h"
+#include "i915_drv.h"
+#include "i915_perf.h"
+#include "i915_reg.h"
+#include "intel_context.h"
+#include "intel_engine.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_regs.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_ring.h"
+#include "shmem_utils.h"
+
+static void set_offsets(u32 *regs,
+ const u8 *data,
+ const struct intel_engine_cs *engine,
+ bool close)
+#define NOP(x) (BIT(7) | (x))
+#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
+#define POSTED BIT(0)
+#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
+#define REG16(x) \
+ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
+ (((x) >> 2) & 0x7f)
+#define END 0
+{
+ const u32 base = engine->mmio_base;
+
+ while (*data) {
+ u8 count, flags;
+
+ if (*data & BIT(7)) { /* skip */
+ count = *data++ & ~BIT(7);
+ regs += count;
+ continue;
+ }
+
+ count = *data & 0x3f;
+ flags = *data >> 6;
+ data++;
+
+ *regs = MI_LOAD_REGISTER_IMM(count);
+ if (flags & POSTED)
+ *regs |= MI_LRI_FORCE_POSTED;
+ if (GRAPHICS_VER(engine->i915) >= 11)
+ *regs |= MI_LRI_LRM_CS_MMIO;
+ regs++;
+
+ GEM_BUG_ON(!count);
+ do {
+ u32 offset = 0;
+ u8 v;
+
+ do {
+ v = *data++;
+ offset <<= 7;
+ offset |= v & ~BIT(7);
+ } while (v & BIT(7));
+
+ regs[0] = base + (offset << 2);
+ regs += 2;
+ } while (--count);
+ }
+
+ if (close) {
+ /* Close the batch; used mainly by live_lrc_layout() */
+ *regs = MI_BATCH_BUFFER_END;
+ if (GRAPHICS_VER(engine->i915) >= 11)
+ *regs |= BIT(0);
+ }
+}
+
+static const u8 gen8_xcs_offsets[] = {
+ NOP(1),
+ LRI(11, 0),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+
+ NOP(9),
+ LRI(9, 0),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(2, 0),
+ REG16(0x200),
+ REG(0x028),
+
+ END
+};
+
+static const u8 gen9_xcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, POSTED),
+ REG16(0x200),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+
+ END
+};
+
+static const u8 gen12_xcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END
+};
+
+static const u8 dg2_xcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END
+};
+
+static const u8 gen8_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
+static const u8 gen9_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x34),
+ REG(0x30),
+ REG(0x38),
+ REG(0x3c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0xc8),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x28),
+ REG(0x9c),
+ REG(0xc0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x68),
+
+ END
+};
+
+static const u8 gen11_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(1, POSTED),
+ REG(0x1b0),
+
+ NOP(10),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
+static const u8 gen12_rcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+ NOP(3 + 9 + 1),
+
+ LRI(51, POSTED),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+ REG(0x084),
+ NOP(1),
+
+ END
+};
+
+static const u8 xehp_rcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
+static const u8 dg2_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
+#undef END
+#undef REG16
+#undef REG
+#undef LRI
+#undef NOP
+
+static const u8 *reg_offsets(const struct intel_engine_cs *engine)
+{
+ /*
+ * The gen12+ lists only have the registers we program in the basic
+ * default state. We rely on the context image using relative
+ * addressing to automatic fixup the register state between the
+ * physical engines for virtual engine.
+ */
+ GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 &&
+ !intel_engine_has_relative_mmio(engine));
+
+ if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) {
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ return dg2_rcs_offsets;
+ else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ return xehp_rcs_offsets;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return gen12_rcs_offsets;
+ else if (GRAPHICS_VER(engine->i915) >= 11)
+ return gen11_rcs_offsets;
+ else if (GRAPHICS_VER(engine->i915) >= 9)
+ return gen9_rcs_offsets;
+ else
+ return gen8_rcs_offsets;
+ } else {
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ return dg2_xcs_offsets;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return gen12_xcs_offsets;
+ else if (GRAPHICS_VER(engine->i915) >= 9)
+ return gen9_xcs_offsets;
+ else
+ return gen8_xcs_offsets;
+ }
+}
+
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ return 0x70;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return 0x60;
+ else if (GRAPHICS_VER(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
+
+static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ return 0x80;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return 0x70;
+ else if (GRAPHICS_VER(engine->i915) >= 9)
+ return 0x64;
+ else if (GRAPHICS_VER(engine->i915) >= 8 &&
+ engine->class == RENDER_CLASS)
+ return 0xc4;
+ else
+ return -1;
+}
+
+static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ return 0x84;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return 0x74;
+ else if (GRAPHICS_VER(engine->i915) >= 9)
+ return 0x68;
+ else if (engine->class == RENDER_CLASS)
+ return 0xd8;
+ else
+ return -1;
+}
+
+static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER(engine->i915) >= 12)
+ return 0x12;
+ else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS)
+ return 0x18;
+ else
+ return -1;
+}
+
+static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
+{
+ int x;
+
+ x = lrc_ring_wa_bb_per_ctx(engine);
+ if (x < 0)
+ return x;
+
+ return x + 2;
+}
+
+static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
+{
+ int x;
+
+ x = lrc_ring_indirect_ptr(engine);
+ if (x < 0)
+ return x;
+
+ return x + 2;
+}
+
+static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
+{
+
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ /*
+ * Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
+ * simply to match the RCS context image layout.
+ */
+ return 0xc6;
+ else if (engine->class != RENDER_CLASS)
+ return -1;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return 0xb6;
+ else if (GRAPHICS_VER(engine->i915) >= 11)
+ return 0xaa;
+ else
+ return -1;
+}
+
+static u32
+lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
+{
+ switch (GRAPHICS_VER(engine->i915)) {
+ default:
+ MISSING_CASE(GRAPHICS_VER(engine->i915));
+ fallthrough;
+ case 12:
+ return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 11:
+ return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 9:
+ return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ case 8:
+ return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ }
+}
+
+static void
+lrc_setup_indirect_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr,
+ u32 size)
+{
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
+ GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
+ regs[lrc_ring_indirect_ptr(engine) + 1] =
+ ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
+
+ GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
+ regs[lrc_ring_indirect_offset(engine) + 1] =
+ lrc_ring_indirect_offset_default(engine) << 6;
+}
+
+static void init_common_regs(u32 * const regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
+{
+ u32 ctl;
+ int loc;
+
+ ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (inhibit)
+ ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ if (GRAPHICS_VER(engine->i915) < 11)
+ ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ regs[CTX_CONTEXT_CONTROL] = ctl;
+
+ regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
+
+ loc = lrc_ring_bb_offset(engine);
+ if (loc != -1)
+ regs[loc + 1] = 0;
+}
+
+static void init_wa_bb_regs(u32 * const regs,
+ const struct intel_engine_cs *engine)
+{
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
+
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
+ }
+
+ if (wa_ctx->indirect_ctx.size) {
+ lrc_setup_indirect_ctx(regs, engine,
+ i915_ggtt_offset(wa_ctx->vma) +
+ wa_ctx->indirect_ctx.offset,
+ wa_ctx->indirect_ctx.size);
+ }
+}
+
+static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt)
+{
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ /* 64b PPGTT (48bit canonical)
+ * PDP0_DESCRIPTOR contains the base address to PML4 and
+ * other PDP Descriptors are ignored.
+ */
+ ASSIGN_CTX_PML4(ppgtt, regs);
+ } else {
+ ASSIGN_CTX_PDP(ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ppgtt, regs, 0);
+ }
+}
+
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
+}
+
+static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
+{
+ int x;
+
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ }
+}
+
+static void __lrc_init_regs(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
+{
+ /*
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
+ *
+ * Must keep consistent with virtual_update_register_offsets().
+ */
+
+ if (inhibit)
+ memset(regs, 0, PAGE_SIZE);
+
+ set_offsets(regs, reg_offsets(engine), engine, inhibit);
+
+ init_common_regs(regs, ce, engine, inhibit);
+ init_ppgtt_regs(regs, vm_alias(ce->vm));
+
+ init_wa_bb_regs(regs, engine);
+
+ __reset_stop_ring(regs, engine);
+}
+
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool inhibit)
+{
+ __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
+}
+
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ __reset_stop_ring(ce->lrc_reg_state, engine);
+}
+
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ vaddr += engine->context_size;
+
+ memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
+}
+
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ vaddr += engine->context_size;
+
+ if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
+ drm_err_once(&engine->i915->drm,
+ "%s context redzone overwritten!\n",
+ engine->name);
+}
+
+static u32 context_wa_bb_offset(const struct intel_context *ce)
+{
+ return PAGE_SIZE * ce->wa_bb_page;
+}
+
+static u32 *context_indirect_bb(const struct intel_context *ce)
+{
+ void *ptr;
+
+ GEM_BUG_ON(!ce->wa_bb_page);
+
+ ptr = ce->lrc_reg_state;
+ ptr -= LRC_STATE_OFFSET; /* back to start of context image */
+ ptr += context_wa_bb_offset(ce);
+
+ return ptr;
+}
+
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state)
+{
+ bool inhibit = true;
+
+ set_redzone(state, engine);
+
+ if (engine->default_state) {
+ shmem_read(engine->default_state, 0,
+ state, engine->context_size);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ inhibit = false;
+ }
+
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(state, 0, PAGE_SIZE);
+
+ /* Clear the indirect wa and storage */
+ if (ce->wa_bb_page)
+ memset(state + context_wa_bb_offset(ce), 0, PAGE_SIZE);
+
+ /*
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
+ */
+ __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
+}
+
+u32 lrc_indirect_bb(const struct intel_context *ce)
+{
+ return i915_ggtt_offset(ce->state) + context_wa_bb_offset(ce);
+}
+
+static u32 *setup_predicate_disable_wa(const struct intel_context *ce, u32 *cs)
+{
+ /* If predication is active, this will be noop'ed */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2);
+ *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA;
+ *cs++ = 0;
+ *cs++ = 0; /* No predication */
+
+ /* predicated end, only terminates if SET_PREDICATE_RESULT:0 is clear */
+ *cs++ = MI_BATCH_BUFFER_END | BIT(15);
+ *cs++ = MI_SET_PREDICATE | MI_SET_PREDICATE_DISABLE;
+
+ /* Instructions are no longer predicated (disabled), we can proceed */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2);
+ *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA;
+ *cs++ = 0;
+ *cs++ = 1; /* enable predication before the next BB */
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ GEM_BUG_ON(offset_in_page(cs) > DG2_PREDICATE_RESULT_WA);
+
+ return cs;
+}
+
+static struct i915_vma *
+__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 context_size;
+
+ context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ context_size += I915_GTT_PAGE_SIZE; /* for redzone */
+
+ if (GRAPHICS_VER(engine->i915) == 12) {
+ ce->wa_bb_page = context_size / PAGE_SIZE;
+ context_size += PAGE_SIZE;
+ }
+
+ if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
+ ce->parallel.guc.parent_page = context_size / PAGE_SIZE;
+ context_size += PARENT_SCRATCH_SIZE;
+ }
+
+ obj = i915_gem_object_create_lmem(engine->i915, context_size,
+ I915_BO_ALLOC_PM_VOLATILE);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ return vma;
+}
+
+static struct intel_timeline *
+pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
+
+ return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
+}
+
+int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
+{
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+ int err;
+
+ GEM_BUG_ON(ce->state);
+
+ vma = __lrc_alloc_state(ce, engine);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ ring = intel_engine_create_ring(engine, ce->ring_size);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err_vma;
+ }
+
+ if (!page_mask_bits(ce->timeline)) {
+ struct intel_timeline *tl;
+
+ /*
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
+ */
+ if (unlikely(ce->timeline))
+ tl = pinned_timeline(ce, engine);
+ else
+ tl = intel_timeline_create(engine->gt);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_ring;
+ }
+
+ ce->timeline = tl;
+ }
+
+ ce->ring = ring;
+ ce->state = vma;
+
+ return 0;
+
+err_ring:
+ intel_ring_put(ring);
+err_vma:
+ i915_vma_put(vma);
+ return err;
+}
+
+void lrc_reset(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
+ intel_ring_reset(ce->ring, ce->ring->emit);
+
+ /* Scrub away the garbage */
+ lrc_init_regs(ce, ce->engine, true);
+ ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
+}
+
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ GEM_BUG_ON(!ce->state);
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+ *vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(ce->engine->i915,
+ ce->state->obj,
+ false) |
+ I915_MAP_OVERRIDE);
+
+ return PTR_ERR_OR_ZERO(*vaddr);
+}
+
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr)
+{
+ ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
+
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
+ lrc_init_state(ce, engine, vaddr);
+
+ ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
+ return 0;
+}
+
+void lrc_unpin(struct intel_context *ce)
+{
+ if (unlikely(ce->parallel.last_rq)) {
+ i915_request_put(ce->parallel.last_rq);
+ ce->parallel.last_rq = NULL;
+ }
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
+ ce->engine);
+}
+
+void lrc_post_unpin(struct intel_context *ce)
+{
+ i915_gem_object_unpin_map(ce->state->obj);
+}
+
+void lrc_fini(struct intel_context *ce)
+{
+ if (!ce->state)
+ return;
+
+ intel_ring_put(fetch_and_zero(&ce->ring));
+ i915_vma_put(fetch_and_zero(&ce->state));
+}
+
+void lrc_destroy(struct kref *kref)
+{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+ GEM_BUG_ON(!i915_active_is_idle(&ce->active));
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ lrc_fini(ce);
+
+ intel_context_fini(ce);
+ intel_context_free(ce);
+}
+
+static u32 *
+gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ CTX_TIMESTAMP * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
+{
+ GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
+{
+ GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
+ (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_REG |
+ MI_LRR_SOURCE_CS_MMIO |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
+ *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0));
+
+ return cs;
+}
+
+/*
+ * On DG2 during context restore of a preempted context in GPGPU mode,
+ * RCS restore hang is detected. This is extremely timing dependent.
+ * To address this below sw wabb is implemented for DG2 A steppings.
+ */
+static u32 *
+dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG);
+ *cs++ = 0x21;
+
+ *cs++ = MI_LOAD_REGISTER_REG;
+ *cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
+ *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT1);
+
+ *cs++ = MI_LOAD_REGISTER_REG;
+ *cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
+ *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT2);
+
+ return cs;
+}
+
+/*
+ * The bspec's tuning guide asks us to program a vertical watermark value of
+ * 0x3FF. However this register is not saved/restored properly by the
+ * hardware, so we're required to apply the desired value via INDIRECT_CTX
+ * batch buffer to ensure the value takes effect properly. All other bits
+ * in this register should remain at 0 (the hardware default).
+ */
+static u32 *
+dg2_emit_draw_watermark_setting(u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(DRAW_WATERMARK);
+ *cs++ = REG_FIELD_PREP(VERT_WM_VAL, 0x3FF);
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
+{
+ cs = gen12_emit_timestamp_wa(ce, cs);
+ cs = gen12_emit_cmd_buf_wa(ce, cs);
+ cs = gen12_emit_restore_scratch(ce, cs);
+
+ /* Wa_22011450934:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(ce->engine->i915, G11, STEP_A0, STEP_B0))
+ cs = dg2_emit_rcs_hang_wabb(ce, cs);
+
+ /* Wa_16013000631:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(ce->engine->i915))
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
+
+ cs = gen12_emit_aux_table_inv(ce->engine, cs);
+
+ /* Wa_16014892111 */
+ if (IS_DG2(ce->engine->i915))
+ cs = dg2_emit_draw_watermark_setting(cs);
+
+ return cs;
+}
+
+static u32 *
+gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
+{
+ cs = gen12_emit_timestamp_wa(ce, cs);
+ cs = gen12_emit_restore_scratch(ce, cs);
+
+ /* Wa_16013000631:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(ce->engine->i915))
+ if (ce->engine->class == COMPUTE_CLASS)
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
+ 0);
+
+ return gen12_emit_aux_table_inv(ce->engine, cs);
+}
+
+static void
+setup_indirect_ctx_bb(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 *(*emit)(const struct intel_context *, u32 *))
+{
+ u32 * const start = context_indirect_bb(ce);
+ u32 *cs;
+
+ cs = emit(ce, start);
+ GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+ while ((unsigned long)cs % CACHELINE_BYTES)
+ *cs++ = MI_NOOP;
+
+ GEM_BUG_ON(cs - start > DG2_PREDICATE_RESULT_BB / sizeof(*start));
+ setup_predicate_disable_wa(ce, start + DG2_PREDICATE_RESULT_BB / sizeof(*start));
+
+ lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
+ lrc_indirect_bb(ce),
+ (cs - start) * sizeof(*cs));
+}
+
+/*
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
+ * bits 12-31: LRCA, GTT address of (the HWSP of) this context
+ * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
+ *
+ * Starting from Gen11, the upper dword of the descriptor has a new format:
+ *
+ * bits 32-36: reserved
+ * bits 37-47: SW context ID
+ * bits 48:53: engine instance
+ * bit 54: mbz, reserved for use by hardware
+ * bits 55-60: SW counter
+ * bits 61-63: engine class
+ *
+ * On Xe_HP, the upper dword of the descriptor has a new format:
+ *
+ * bits 32-37: virtual function number
+ * bit 38: mbz, reserved for use by hardware
+ * bits 39-54: SW context ID
+ * bits 55-57: reserved
+ * bits 58-63: SW counter
+ *
+ * engine info, SW context ID and SW counter need to form a unique number
+ * (Context ID) per lrc.
+ */
+static u32 lrc_descriptor(const struct intel_context *ce)
+{
+ u32 desc;
+
+ desc = INTEL_LEGACY_32B_CONTEXT;
+ if (i915_vm_is_4lvl(ce->vm))
+ desc = INTEL_LEGACY_64B_CONTEXT;
+ desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (GRAPHICS_VER(ce->vm->i915) == 8)
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+
+ return i915_ggtt_offset(ce->state) | desc;
+}
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head)
+{
+ struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ regs[CTX_RING_HEAD] = head;
+ regs[CTX_RING_TAIL] = ring->tail;
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+
+ /* RPCS */
+ if (engine->class == RENDER_CLASS) {
+ regs[CTX_R_PWR_CLK_STATE] =
+ intel_sseu_make_rpcs(engine->gt, &ce->sseu);
+
+ i915_oa_init_reg_state(ce, engine);
+ }
+
+ if (ce->wa_bb_page) {
+ u32 *(*fn)(const struct intel_context *ce, u32 *cs);
+
+ fn = gen12_emit_indirect_ctx_xcs;
+ if (ce->engine->class == RENDER_CLASS)
+ fn = gen12_emit_indirect_ctx_rcs;
+
+ /* Mutually exclusive wrt to global indirect bb */
+ GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
+ setup_indirect_ctx_bb(ce, engine, fn);
+ }
+
+ return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
+}
+
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine)
+{
+ set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
+}
+
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when)
+{
+ const struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+ bool valid = true;
+ int x;
+
+ if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+ pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_START],
+ i915_ggtt_offset(ring->vma));
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ valid = false;
+ }
+
+ if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+ pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_CTL],
+ (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ valid = false;
+ }
+
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+ pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+ engine->name, regs[x + 1]);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ valid = false;
+ }
+
+ WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
+}
+
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static u32 *
+gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
+{
+ /* NB no one else is allowed to scribble over scratch + 256! */
+ *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+ *batch++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
+ *batch++ = 0;
+
+ *batch++ = MI_LOAD_REGISTER_IMM(1);
+ *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+ *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
+
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
+
+ *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+ *batch++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
+ *batch++ = 0;
+
+ return batch;
+}
+
+/*
+ * Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ * initialized at the beginning and shared across all contexts but this field
+ * helps us to have multiple batches at different offsets and select them based
+ * on a criteria. At the moment this batch always start at the beginning of the page
+ * and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ * The number of WA applied are not known at the beginning; we use this field
+ * to return the no of DWORDS written.
+ *
+ * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ * so it adds NOOPs as padding to make it cacheline aligned.
+ * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ * makes a complete batch buffer.
+ */
+static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+{
+ /* WaDisableCtxRestoreArbitration:bdw,chv */
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+ if (IS_BROADWELL(engine->i915))
+ batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+
+ /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+ /* Actual scratch location is at 128 bytes offset */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ LRC_PPHWSP_SCRATCH_ADDR);
+
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ /* Pad to end of cacheline */
+ while ((unsigned long)batch % CACHELINE_BYTES)
+ *batch++ = MI_NOOP;
+
+ /*
+ * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+ * execution depends on the length specified in terms of cache lines
+ * in the register CTX_RCS_INDIRECT_CTX
+ */
+
+ return batch;
+}
+
+struct lri {
+ i915_reg_t reg;
+ u32 value;
+};
+
+static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
+{
+ GEM_BUG_ON(!count || count > 63);
+
+ *batch++ = MI_LOAD_REGISTER_IMM(count);
+ do {
+ *batch++ = i915_mmio_reg_offset(lri->reg);
+ *batch++ = lri->value;
+ } while (lri++, --count);
+ *batch++ = MI_NOOP;
+
+ return batch;
+}
+
+static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+{
+ static const struct lri lri[] = {
+ /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+ {
+ COMMON_SLICE_CHICKEN2,
+ __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
+ 0),
+ },
+
+ /* BSpec: 11391 */
+ {
+ FF_SLICE_CHICKEN,
+ __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
+ FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+ },
+
+ /* BSpec: 11299 */
+ {
+ _3D_CHICKEN3,
+ __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
+ _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+ }
+ };
+
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
+ batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ LRC_PPHWSP_SCRATCH_ADDR);
+
+ batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
+
+ /* WaMediaPoolStateCmdInWABB:bxt,glk */
+ if (HAS_POOLED_EU(engine->i915)) {
+ /*
+ * EU pool configuration is setup along with golden context
+ * during context initialization. This value depends on
+ * device type (2x6 or 3x6) and needs to be updated based
+ * on which subslice is disabled especially for 2x6
+ * devices, however it is safe to load default
+ * configuration of 3x6 device instead of masking off
+ * corresponding bits because HW ignores bits of a disabled
+ * subslice and drops down to appropriate config. Please
+ * see render_state_setup() in i915_gem_render_state.c for
+ * possible configurations, to avoid duplication they are
+ * not shown here again.
+ */
+ *batch++ = GEN9_MEDIA_POOL_STATE;
+ *batch++ = GEN9_MEDIA_POOL_ENABLE;
+ *batch++ = 0x00777000;
+ *batch++ = 0;
+ *batch++ = 0;
+ *batch++ = 0;
+ }
+
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ /* Pad to end of cacheline */
+ while ((unsigned long)batch % CACHELINE_BYTES)
+ *batch++ = MI_NOOP;
+
+ return batch;
+}
+
+#define CTX_WA_BB_SIZE (PAGE_SIZE)
+
+static int lrc_create_wa_ctx(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ engine->wa_ctx.vma = vma;
+ return 0;
+
+err:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
+{
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+}
+
+typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
+
+void lrc_init_wa_ctx(struct intel_engine_cs *engine)
+{
+ struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+ struct i915_wa_ctx_bb *wa_bb[] = {
+ &wa_ctx->indirect_ctx, &wa_ctx->per_ctx
+ };
+ wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
+ struct i915_gem_ww_ctx ww;
+ void *batch, *batch_ptr;
+ unsigned int i;
+ int err;
+
+ if (!(engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
+ return;
+
+ switch (GRAPHICS_VER(engine->i915)) {
+ case 12:
+ case 11:
+ return;
+ case 9:
+ wa_bb_fn[0] = gen9_init_indirectctx_bb;
+ wa_bb_fn[1] = NULL;
+ break;
+ case 8:
+ wa_bb_fn[0] = gen8_init_indirectctx_bb;
+ wa_bb_fn[1] = NULL;
+ break;
+ default:
+ MISSING_CASE(GRAPHICS_VER(engine->i915));
+ return;
+ }
+
+ err = lrc_create_wa_ctx(engine);
+ if (err) {
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ drm_err(&engine->i915->drm,
+ "Ignoring context switch w/a allocation error:%d\n",
+ err);
+ return;
+ }
+
+ if (!engine->wa_ctx.vma)
+ return;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(wa_ctx->vma->obj, &ww);
+ if (!err)
+ err = i915_ggtt_pin(wa_ctx->vma, &ww, 0, PIN_HIGH);
+ if (err)
+ goto err;
+
+ batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_unpin;
+ }
+
+ /*
+ * Emit the two workaround batch buffers, recording the offset from the
+ * start of the workaround batch buffer object for each and their
+ * respective sizes.
+ */
+ batch_ptr = batch;
+ for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
+ wa_bb[i]->offset = batch_ptr - batch;
+ if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
+ CACHELINE_BYTES))) {
+ err = -EINVAL;
+ break;
+ }
+ if (wa_bb_fn[i])
+ batch_ptr = wa_bb_fn[i](engine, batch_ptr);
+ wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
+ }
+ GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE);
+
+ __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
+ __i915_gem_object_release_map(wa_ctx->vma->obj);
+
+ /* Verify that we can handle failure to setup the wa_ctx */
+ if (!err)
+ err = i915_inject_probe_error(engine->i915, -ENODEV);
+
+err_unpin:
+ if (err)
+ i915_vma_unpin(wa_ctx->vma);
+err:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (err) {
+ i915_vma_put(engine->wa_ctx.vma);
+
+ /* Clear all flags to prevent further use */
+ memset(wa_ctx, 0, sizeof(*wa_ctx));
+ }
+}
+
+static void st_runtime_underflow(struct intel_context_stats *stats, s32 dt)
+{
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ stats->runtime.num_underflow++;
+ stats->runtime.max_underflow =
+ max_t(u32, stats->runtime.max_underflow, -dt);
+#endif
+}
+
+static u32 lrc_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+void lrc_update_runtime(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+ u32 old;
+ s32 dt;
+
+ old = stats->runtime.last;
+ stats->runtime.last = lrc_get_runtime(ce);
+ dt = stats->runtime.last - old;
+ if (!dt)
+ return;
+
+ if (unlikely(dt < 0)) {
+ CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+ old, stats->runtime.last, dt);
+ st_runtime_underflow(stats, dt);
+ return;
+ }
+
+ ewma_runtime_add(&stats->runtime.avg, dt);
+ stats->runtime.total += dt;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_lrc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
new file mode 100644
index 000000000..a390f0813
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef __INTEL_LRC_H__
+#define __INTEL_LRC_H__
+
+#include "i915_priolist_types.h"
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "intel_context.h"
+
+struct drm_i915_gem_object;
+struct i915_gem_ww_ctx;
+struct intel_engine_cs;
+struct intel_ring;
+struct kref;
+
+/* At the start of the context image is its per-process HWS page */
+#define LRC_PPHWSP_PN (0)
+#define LRC_PPHWSP_SZ (1)
+/* After the PPHWSP we have the logical state for the context */
+#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
+#define LRC_STATE_OFFSET (LRC_STATE_PN * PAGE_SIZE)
+
+/* Space within PPHWSP reserved to be used as scratch */
+#define LRC_PPHWSP_SCRATCH 0x34
+#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
+
+void lrc_init_wa_ctx(struct intel_engine_cs *engine);
+void lrc_fini_wa_ctx(struct intel_engine_cs *engine);
+
+int lrc_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+void lrc_reset(struct intel_context *ce);
+void lrc_fini(struct intel_context *ce);
+void lrc_destroy(struct kref *kref);
+
+int
+lrc_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr);
+int
+lrc_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr);
+void lrc_unpin(struct intel_context *ce);
+void lrc_post_unpin(struct intel_context *ce);
+
+void lrc_init_state(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *state);
+
+void lrc_init_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ bool clear);
+void lrc_reset_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+u32 lrc_update_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 head);
+void lrc_update_offsets(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+
+void lrc_check_regs(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const char *when);
+
+void lrc_update_runtime(struct intel_context *ce);
+
+enum {
+ INTEL_ADVANCED_CONTEXT = 0,
+ INTEL_LEGACY_32B_CONTEXT,
+ INTEL_ADVANCED_AD_CONTEXT,
+ INTEL_LEGACY_64B_CONTEXT
+};
+
+enum {
+ FAULT_AND_HANG = 0,
+ FAULT_AND_HALT, /* Debug only */
+ FAULT_AND_STREAM,
+ FAULT_AND_CONTINUE /* Unsupported */
+};
+
+#define CTX_GTT_ADDRESS_MASK GENMASK(31, 12)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN12_CTX_PRIORITY_MASK GENMASK(10, 9)
+#define GEN12_CTX_PRIORITY_HIGH FIELD_PREP(GEN12_CTX_PRIORITY_MASK, 2)
+#define GEN12_CTX_PRIORITY_NORMAL FIELD_PREP(GEN12_CTX_PRIORITY_MASK, 1)
+#define GEN12_CTX_PRIORITY_LOW FIELD_PREP(GEN12_CTX_PRIORITY_MASK, 0)
+#define GEN8_CTX_ID_SHIFT 32
+#define GEN8_CTX_ID_WIDTH 21
+#define GEN11_SW_CTX_ID_SHIFT 37
+#define GEN11_SW_CTX_ID_WIDTH 11
+#define GEN11_ENGINE_CLASS_SHIFT 61
+#define GEN11_ENGINE_CLASS_WIDTH 3
+#define GEN11_ENGINE_INSTANCE_SHIFT 48
+#define GEN11_ENGINE_INSTANCE_WIDTH 6
+#define XEHP_SW_CTX_ID_SHIFT 39
+#define XEHP_SW_CTX_ID_WIDTH 16
+#define XEHP_SW_COUNTER_SHIFT 58
+#define XEHP_SW_COUNTER_WIDTH 6
+
+static inline void lrc_runtime_start(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+
+ if (intel_context_is_barrier(ce))
+ return;
+
+ if (stats->active)
+ return;
+
+ WRITE_ONCE(stats->active, intel_context_clock());
+}
+
+static inline void lrc_runtime_stop(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+
+ if (!stats->active)
+ return;
+
+ lrc_update_runtime(ce);
+ WRITE_ONCE(stats->active, 0);
+}
+
+#define DG2_PREDICATE_RESULT_WA (PAGE_SIZE - sizeof(u64))
+#define DG2_PREDICATE_RESULT_BB (2048)
+
+u32 lrc_indirect_bb(const struct intel_context *ce);
+
+#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
new file mode 100644
index 000000000..304000c7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_LRC_REG_H_
+#define _INTEL_LRC_REG_H_
+
+#include <linux/types.h>
+
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
+/* GEN8 to GEN12 Reg State Context */
+#define CTX_CONTEXT_CONTROL (0x02 + 1)
+#define CTX_RING_HEAD (0x04 + 1)
+#define CTX_RING_TAIL (0x06 + 1)
+#define CTX_RING_START (0x08 + 1)
+#define CTX_RING_CTL (0x0a + 1)
+#define CTX_BB_STATE (0x10 + 1)
+#define CTX_TIMESTAMP (0x22 + 1)
+#define CTX_PDP3_UDW (0x24 + 1)
+#define CTX_PDP3_LDW (0x26 + 1)
+#define CTX_PDP2_UDW (0x28 + 1)
+#define CTX_PDP2_LDW (0x2a + 1)
+#define CTX_PDP1_UDW (0x2c + 1)
+#define CTX_PDP1_LDW (0x2e + 1)
+#define CTX_PDP0_UDW (0x30 + 1)
+#define CTX_PDP0_LDW (0x32 + 1)
+#define CTX_R_PWR_CLK_STATE (0x42 + 1)
+
+#define GEN9_CTX_RING_MI_MODE 0x54
+
+#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
+ u32 *reg_state__ = (reg_state); \
+ const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
+ (reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \
+} while (0)
+
+#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
+ u32 *reg_state__ = (reg_state); \
+ const u64 addr__ = px_dma((ppgtt)->pd); \
+ (reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \
+} while (0)
+
+#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
+#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
+#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
+#define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A
+#define GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0xD
+
+#define GEN8_EXECLISTS_STATUS_BUF 0x370
+#define GEN11_EXECLISTS_STATUS_BUF2 0x3c0
+
+/*
+ * The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+
+#define MAX_CONTEXT_HW_ID (1 << 21) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1 << 11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+/* in Xe_HP ID 0xFFFF is reserved to indicate "invalid context" */
+#define XEHP_MAX_CONTEXT_HW_ID 0xFFFF
+
+#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
new file mode 100644
index 000000000..ee072c7d6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -0,0 +1,1135 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_context.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+#include "intel_migrate.h"
+#include "intel_ring.h"
+
+struct insert_pte_data {
+ u64 offset;
+};
+
+#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
+
+#define GET_CCS_BYTES(i915, size) (HAS_FLAT_CCS(i915) ? \
+ DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0)
+static bool engine_supports_migration(struct intel_engine_cs *engine)
+{
+ if (!engine)
+ return false;
+
+ /*
+ * We need the ability to prevent aribtration (MI_ARB_ON_OFF),
+ * the ability to write PTE using inline data (MI_STORE_DATA)
+ * and of course the ability to do the block transfer (blits).
+ */
+ GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS);
+
+ return true;
+}
+
+static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
+{
+ struct insert_pte_data *d = data;
+
+ /*
+ * Insert a dummy PTE into every PT that will map to LMEM to ensure
+ * we have a correctly setup PDE structure for later use.
+ */
+ vm->insert_page(vm, 0, d->offset, I915_CACHE_NONE, PTE_LM);
+ GEM_BUG_ON(!pt->is_compact);
+ d->offset += SZ_2M;
+}
+
+static void xehpsdv_insert_pte(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
+{
+ struct insert_pte_data *d = data;
+
+ /*
+ * We are playing tricks here, since the actual pt, from the hw
+ * pov, is only 256bytes with 32 entries, or 4096bytes with 512
+ * entries, but we are still guaranteed that the physical
+ * alignment is 64K underneath for the pt, and we are careful
+ * not to access the space in the void.
+ */
+ vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, PTE_LM);
+ d->offset += SZ_64K;
+}
+
+static void insert_pte(struct i915_address_space *vm,
+ struct i915_page_table *pt,
+ void *data)
+{
+ struct insert_pte_data *d = data;
+
+ vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
+ i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
+ d->offset += PAGE_SIZE;
+}
+
+static struct i915_address_space *migrate_vm(struct intel_gt *gt)
+{
+ struct i915_vm_pt_stash stash = {};
+ struct i915_ppgtt *vm;
+ int err;
+ int i;
+
+ /*
+ * We construct a very special VM for use by all migration contexts,
+ * it is kept pinned so that it can be used at any time. As we need
+ * to pre-allocate the page directories for the migration VM, this
+ * limits us to only using a small number of prepared vma.
+ *
+ * To be able to pipeline and reschedule migration operations while
+ * avoiding unnecessary contention on the vm itself, the PTE updates
+ * are inline with the blits. All the blits use the same fixed
+ * addresses, with the backing store redirection being updated on the
+ * fly. Only 2 implicit vma are used for all migration operations.
+ *
+ * We lay the ppGTT out as:
+ *
+ * [0, CHUNK_SZ) -> first object
+ * [CHUNK_SZ, 2 * CHUNK_SZ) -> second object
+ * [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE
+ *
+ * By exposing the dma addresses of the page directories themselves
+ * within the ppGTT, we are then able to rewrite the PTE prior to use.
+ * But the PTE update and subsequent migration operation must be atomic,
+ * i.e. within the same non-preemptible window so that we do not switch
+ * to another migration context that overwrites the PTE.
+ *
+ * This changes quite a bit on platforms with HAS_64K_PAGES support,
+ * where we instead have three windows, each CHUNK_SIZE in size. The
+ * first is reserved for mapping system-memory, and that just uses the
+ * 512 entry layout using 4K GTT pages. The other two windows just map
+ * lmem pages and must use the new compact 32 entry layout using 64K GTT
+ * pages, which ensures we can address any lmem object that the user
+ * throws at us. We then also use the xehpsdv_toggle_pdes as a way of
+ * just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
+ * compact layout for each of these page-tables, that fall within the
+ * [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
+ *
+ * We lay the ppGTT out as:
+ *
+ * [0, CHUNK_SZ) -> first window/object, maps smem
+ * [CHUNK_SZ, 2 * CHUNK_SZ) -> second window/object, maps lmem src
+ * [2 * CHUNK_SZ, 3 * CHUNK_SZ) -> third window/object, maps lmem dst
+ *
+ * For the PTE window it's also quite different, since each PTE must
+ * point to some 64K page, one for each PT(since it's in lmem), and yet
+ * each is only <= 4096bytes, but since the unused space within that PTE
+ * range is never touched, this should be fine.
+ *
+ * So basically each PT now needs 64K of virtual memory, instead of 4K,
+ * which looks like:
+ *
+ * [3 * CHUNK_SZ, 3 * CHUNK_SZ + ((3 * CHUNK_SZ / SZ_2M) * SZ_64K)] -> PTE
+ */
+
+ vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY);
+ if (IS_ERR(vm))
+ return ERR_CAST(vm);
+
+ if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
+ err = -ENODEV;
+ goto err_vm;
+ }
+
+ if (HAS_64K_PAGES(gt->i915))
+ stash.pt_sz = I915_GTT_PAGE_SIZE_64K;
+
+ /*
+ * Each engine instance is assigned its own chunk in the VM, so
+ * that we can run multiple instances concurrently
+ */
+ for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
+ struct intel_engine_cs *engine;
+ u64 base = (u64)i << 32;
+ struct insert_pte_data d = {};
+ struct i915_gem_ww_ctx ww;
+ u64 sz;
+
+ engine = gt->engine_class[COPY_ENGINE_CLASS][i];
+ if (!engine_supports_migration(engine))
+ continue;
+
+ /*
+ * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need
+ * 4x2 page directories for source/destination.
+ */
+ if (HAS_64K_PAGES(gt->i915))
+ sz = 3 * CHUNK_SZ;
+ else
+ sz = 2 * CHUNK_SZ;
+ d.offset = base + sz;
+
+ /*
+ * We need another page directory setup so that we can write
+ * the 8x512 PTE in each chunk.
+ */
+ if (HAS_64K_PAGES(gt->i915))
+ sz += (sz / SZ_2M) * SZ_64K;
+ else
+ sz += (sz >> 12) * sizeof(u64);
+
+ err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
+ if (err)
+ goto err_vm;
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_vm_lock_objects(&vm->vm, &ww);
+ if (err)
+ continue;
+ err = i915_vm_map_pt_stash(&vm->vm, &stash);
+ if (err)
+ continue;
+
+ vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
+ }
+ i915_vm_free_pt_stash(&vm->vm, &stash);
+ if (err)
+ goto err_vm;
+
+ /* Now allow the GPU to rewrite the PTE via its own ppGTT */
+ if (HAS_64K_PAGES(gt->i915)) {
+ vm->vm.foreach(&vm->vm, base, d.offset - base,
+ xehpsdv_insert_pte, &d);
+ d.offset = base + CHUNK_SZ;
+ vm->vm.foreach(&vm->vm,
+ d.offset,
+ 2 * CHUNK_SZ,
+ xehpsdv_toggle_pdes, &d);
+ } else {
+ vm->vm.foreach(&vm->vm, base, d.offset - base,
+ insert_pte, &d);
+ }
+ }
+
+ return &vm->vm;
+
+err_vm:
+ i915_vm_put(&vm->vm);
+ return ERR_PTR(err);
+}
+
+static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
+ engine = gt->engine_class[COPY_ENGINE_CLASS][i];
+ if (engine_supports_migration(engine))
+ return engine;
+ }
+
+ return NULL;
+}
+
+static struct intel_context *pinned_context(struct intel_gt *gt)
+{
+ static struct lock_class_key key;
+ struct intel_engine_cs *engine;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
+
+ engine = first_copy_engine(gt);
+ if (!engine)
+ return ERR_PTR(-ENODEV);
+
+ vm = migrate_vm(gt);
+ if (IS_ERR(vm))
+ return ERR_CAST(vm);
+
+ ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
+ I915_GEM_HWS_MIGRATE,
+ &key, "migrate");
+ i915_vm_put(vm);
+ return ce;
+}
+
+int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt)
+{
+ struct intel_context *ce;
+
+ memset(m, 0, sizeof(*m));
+
+ ce = pinned_context(gt);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ m->context = ce;
+ return 0;
+}
+
+static int random_index(unsigned int max)
+{
+ return upper_32_bits(mul_u32_u32(get_random_u32(), max));
+}
+
+static struct intel_context *__migrate_engines(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engines[MAX_ENGINE_INSTANCE];
+ struct intel_engine_cs *engine;
+ unsigned int count, i;
+
+ count = 0;
+ for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
+ engine = gt->engine_class[COPY_ENGINE_CLASS][i];
+ if (engine_supports_migration(engine))
+ engines[count++] = engine;
+ }
+
+ return intel_context_create(engines[random_index(count)]);
+}
+
+struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
+{
+ struct intel_context *ce;
+
+ /*
+ * We randomly distribute contexts across the engines upon constrction,
+ * as they all share the same pinned vm, and so in order to allow
+ * multiple blits to run in parallel, we must construct each blit
+ * to use a different range of the vm for its GTT. This has to be
+ * known at construction, so we can not use the late greedy load
+ * balancing of the virtual-engine.
+ */
+ ce = __migrate_engines(m->context->engine->gt);
+ if (IS_ERR(ce))
+ return ce;
+
+ ce->ring = NULL;
+ ce->ring_size = SZ_256K;
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(m->context->vm);
+
+ return ce;
+}
+
+static inline struct sgt_dma sg_sgt(struct scatterlist *sg)
+{
+ dma_addr_t addr = sg_dma_address(sg);
+
+ return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
+}
+
+static int emit_no_arbitration(struct i915_request *rq)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Explicitly disable preemption for this request. */
+ *cs++ = MI_ARB_ON_OFF;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int max_pte_pkt_size(struct i915_request *rq, int pkt)
+{
+ struct intel_ring *ring = rq->ring;
+
+ pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
+ pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+
+ return pkt;
+}
+
+static int emit_pte(struct i915_request *rq,
+ struct sgt_dma *it,
+ enum i915_cache_level cache_level,
+ bool is_lmem,
+ u64 offset,
+ int length)
+{
+ bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915);
+ const u64 encode = rq->context->vm->pte_encode(0, cache_level,
+ is_lmem ? PTE_LM : 0);
+ struct intel_ring *ring = rq->ring;
+ int pkt, dword_length;
+ u32 total = 0;
+ u32 page_size;
+ u32 *hdr, *cs;
+
+ GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
+
+ page_size = I915_GTT_PAGE_SIZE;
+ dword_length = 0x400;
+
+ /* Compute the page directory offset for the target address range */
+ if (has_64K_pages) {
+ GEM_BUG_ON(!IS_ALIGNED(offset, SZ_2M));
+
+ offset /= SZ_2M;
+ offset *= SZ_64K;
+ offset += 3 * CHUNK_SZ;
+
+ if (is_lmem) {
+ page_size = I915_GTT_PAGE_SIZE_64K;
+ dword_length = 0x40;
+ }
+ } else {
+ offset >>= 12;
+ offset *= sizeof(u64);
+ offset += 2 * CHUNK_SZ;
+ }
+
+ offset += (u64)rq->engine->instance << 32;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Pack as many PTE updates as possible into a single MI command */
+ pkt = max_pte_pkt_size(rq, dword_length);
+
+ hdr = cs;
+ *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ do {
+ if (cs - hdr >= pkt) {
+ int dword_rem;
+
+ *hdr += cs - hdr - 2;
+ *cs++ = MI_NOOP;
+
+ ring->emit = (void *)cs - ring->vaddr;
+ intel_ring_advance(rq, cs);
+ intel_ring_update_space(ring);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ dword_rem = dword_length;
+ if (has_64K_pages) {
+ if (IS_ALIGNED(total, SZ_2M)) {
+ offset = round_up(offset, SZ_64K);
+ } else {
+ dword_rem = SZ_2M - (total & (SZ_2M - 1));
+ dword_rem /= page_size;
+ dword_rem *= 2;
+ }
+ }
+
+ pkt = max_pte_pkt_size(rq, dword_rem);
+
+ hdr = cs;
+ *cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ }
+
+ GEM_BUG_ON(!IS_ALIGNED(it->dma, page_size));
+
+ *cs++ = lower_32_bits(encode | it->dma);
+ *cs++ = upper_32_bits(encode | it->dma);
+
+ offset += 8;
+ total += page_size;
+
+ it->dma += page_size;
+ if (it->dma >= it->max) {
+ it->sg = __sg_next(it->sg);
+ if (!it->sg || sg_dma_len(it->sg) == 0)
+ break;
+
+ it->dma = sg_dma_address(it->sg);
+ it->max = it->dma + sg_dma_len(it->sg);
+ }
+ } while (total < length);
+
+ *hdr += cs - hdr - 2;
+ *cs++ = MI_NOOP;
+
+ ring->emit = (void *)cs - ring->vaddr;
+ intel_ring_advance(rq, cs);
+ intel_ring_update_space(ring);
+
+ return total;
+}
+
+static bool wa_1209644611_applies(int ver, u32 size)
+{
+ u32 height = size >> PAGE_SHIFT;
+
+ if (ver != 11)
+ return false;
+
+ return height % 4 == 3 && height <= 8;
+}
+
+/**
+ * DOC: Flat-CCS - Memory compression for Local memory
+ *
+ * On Xe-HP and later devices, we use dedicated compression control state (CCS)
+ * stored in local memory for each surface, to support the 3D and media
+ * compression formats.
+ *
+ * The memory required for the CCS of the entire local memory is 1/256 of the
+ * local memory size. So before the kernel boot, the required memory is reserved
+ * for the CCS data and a secure register will be programmed with the CCS base
+ * address.
+ *
+ * Flat CCS data needs to be cleared when a lmem object is allocated.
+ * And CCS data can be copied in and out of CCS region through
+ * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly.
+ *
+ * I915 supports Flat-CCS on lmem only objects. When an objects has smem in
+ * its preference list, on memory pressure, i915 needs to migrate the lmem
+ * content into smem. If the lmem object is Flat-CCS compressed by userspace,
+ * then i915 needs to decompress it. But I915 lack the required information
+ * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
+ *
+ * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
+ * be temporarily evicted to smem, along with the auxiliary CCS state, where
+ * it can be potentially swapped-out at a later point, if required.
+ * If userspace later touches the evicted pages, then we always move
+ * the backing memory back to lmem, which includes restoring the saved CCS state,
+ * and potentially performing any required swap-in.
+ *
+ * For the migration of the lmem objects with smem in placement list, such as
+ * {lmem, smem}, objects are treated as non Flat-CCS capable objects.
+ */
+
+static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
+{
+ *cmd++ = MI_FLUSH_DW | flags;
+ *cmd++ = 0;
+ *cmd++ = 0;
+
+ return cmd;
+}
+
+static int emit_copy_ccs(struct i915_request *rq,
+ u32 dst_offset, u8 dst_access,
+ u32 src_offset, u8 src_access, int size)
+{
+ struct drm_i915_private *i915 = rq->engine->i915;
+ int mocs = rq->engine->gt->mocs.uc_index << 1;
+ u32 num_ccs_blks;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
+ NUM_CCS_BYTES_PER_BLOCK);
+ GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
+ cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
+
+ /*
+ * The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS
+ * data in and out of the CCS region.
+ *
+ * We can copy at most 1024 blocks of 256 bytes using one
+ * XY_CTRL_SURF_COPY_BLT instruction.
+ *
+ * In case we need to copy more than 1024 blocks, we need to add
+ * another instruction to the same batch buffer.
+ *
+ * 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS.
+ *
+ * 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM.
+ */
+ *cs++ = XY_CTRL_SURF_COPY_BLT |
+ src_access << SRC_ACCESS_TYPE_SHIFT |
+ dst_access << DST_ACCESS_TYPE_SHIFT |
+ ((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT;
+ *cs++ = src_offset;
+ *cs++ = rq->engine->instance |
+ FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
+ *cs++ = dst_offset;
+ *cs++ = rq->engine->instance |
+ FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
+
+ cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int emit_copy(struct i915_request *rq,
+ u32 dst_offset, u32 src_offset, int size)
+{
+ const int ver = GRAPHICS_VER(rq->engine->i915);
+ u32 instance = rq->engine->instance;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (ver >= 9 && !wa_1209644611_applies(ver, size)) {
+ *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
+ *cs++ = BLT_DEPTH_32 | PAGE_SIZE;
+ *cs++ = 0;
+ *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = dst_offset;
+ *cs++ = instance;
+ *cs++ = 0;
+ *cs++ = PAGE_SIZE;
+ *cs++ = src_offset;
+ *cs++ = instance;
+ } else if (ver >= 8) {
+ *cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+ *cs++ = 0;
+ *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = dst_offset;
+ *cs++ = instance;
+ *cs++ = 0;
+ *cs++ = PAGE_SIZE;
+ *cs++ = src_offset;
+ *cs++ = instance;
+ } else {
+ GEM_BUG_ON(instance);
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+ *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
+ *cs++ = dst_offset;
+ *cs++ = PAGE_SIZE;
+ *cs++ = src_offset;
+ }
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static u64 scatter_list_length(struct scatterlist *sg)
+{
+ u64 len = 0;
+
+ while (sg && sg_dma_len(sg)) {
+ len += sg_dma_len(sg);
+ sg = sg_next(sg);
+ }
+
+ return len;
+}
+
+static int
+calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
+ u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
+{
+ if (ccs_bytes_to_cpy && !src_is_lmem)
+ /*
+ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
+ * will be taken for the blt. in Flat-ccs supported
+ * platform Smem obj will have more pages than required
+ * for main meory hence limit it to the required size
+ * for main memory
+ */
+ return min_t(u64, bytes_to_cpy, CHUNK_SZ);
+ else
+ return CHUNK_SZ;
+}
+
+static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
+{
+ u64 len;
+
+ do {
+ GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
+ len = it->max - it->dma;
+ if (len > bytes_to_cpy) {
+ it->dma += bytes_to_cpy;
+ break;
+ }
+
+ bytes_to_cpy -= len;
+
+ it->sg = __sg_next(it->sg);
+ it->dma = sg_dma_address(it->sg);
+ it->max = it->dma + sg_dma_len(it->sg);
+ } while (bytes_to_cpy);
+}
+
+int
+intel_context_migrate_copy(struct intel_context *ce,
+ const struct i915_deps *deps,
+ struct scatterlist *src,
+ enum i915_cache_level src_cache_level,
+ bool src_is_lmem,
+ struct scatterlist *dst,
+ enum i915_cache_level dst_cache_level,
+ bool dst_is_lmem,
+ struct i915_request **out)
+{
+ struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
+ enum i915_cache_level ccs_cache_level;
+ u32 src_offset, dst_offset;
+ u8 src_access, dst_access;
+ struct i915_request *rq;
+ u64 src_sz, dst_sz;
+ bool ccs_is_src, overwrite_ccs;
+ int err;
+
+ GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+ GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
+ *out = NULL;
+
+ GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+ src_sz = scatter_list_length(src);
+ bytes_to_cpy = src_sz;
+
+ if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) {
+ src_access = !src_is_lmem && dst_is_lmem;
+ dst_access = !src_access;
+
+ dst_sz = scatter_list_length(dst);
+ if (src_is_lmem) {
+ it_ccs = it_dst;
+ ccs_cache_level = dst_cache_level;
+ ccs_is_src = false;
+ } else if (dst_is_lmem) {
+ bytes_to_cpy = dst_sz;
+ it_ccs = it_src;
+ ccs_cache_level = src_cache_level;
+ ccs_is_src = true;
+ }
+
+ /*
+ * When there is a eviction of ccs needed smem will have the
+ * extra pages for the ccs data
+ *
+ * TO-DO: Want to move the size mismatch check to a WARN_ON,
+ * but still we have some requests of smem->lmem with same size.
+ * Need to fix it.
+ */
+ ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0;
+ if (ccs_bytes_to_cpy)
+ get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
+ }
+
+ overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
+
+ src_offset = 0;
+ dst_offset = CHUNK_SZ;
+ if (HAS_64K_PAGES(ce->engine->i915)) {
+ src_offset = 0;
+ dst_offset = 0;
+ if (src_is_lmem)
+ src_offset = CHUNK_SZ;
+ if (dst_is_lmem)
+ dst_offset = 2 * CHUNK_SZ;
+ }
+
+ do {
+ int len;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
+ if (err)
+ goto out_rq;
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ deps = NULL;
+ }
+
+ /* The PTE updates + copy must not be interrupted. */
+ err = emit_no_arbitration(rq);
+ if (err)
+ goto out_rq;
+
+ src_sz = calculate_chunk_sz(i915, src_is_lmem,
+ bytes_to_cpy, ccs_bytes_to_cpy);
+
+ len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
+ src_offset, src_sz);
+ if (!len) {
+ err = -EINVAL;
+ goto out_rq;
+ }
+ if (len < 0) {
+ err = len;
+ goto out_rq;
+ }
+
+ err = emit_pte(rq, &it_dst, dst_cache_level, dst_is_lmem,
+ dst_offset, len);
+ if (err < 0)
+ goto out_rq;
+ if (err < len) {
+ err = -EINVAL;
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_copy(rq, dst_offset, src_offset, len);
+ if (err)
+ goto out_rq;
+
+ bytes_to_cpy -= len;
+
+ if (ccs_bytes_to_cpy) {
+ int ccs_sz;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ ccs_sz = GET_CCS_BYTES(i915, len);
+ err = emit_pte(rq, &it_ccs, ccs_cache_level, false,
+ ccs_is_src ? src_offset : dst_offset,
+ ccs_sz);
+ if (err < 0)
+ goto out_rq;
+ if (err < ccs_sz) {
+ err = -EINVAL;
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_copy_ccs(rq, dst_offset, dst_access,
+ src_offset, src_access, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+ ccs_bytes_to_cpy -= ccs_sz;
+ } else if (overwrite_ccs) {
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /*
+ * While we can't always restore/manage the CCS state,
+ * we still need to ensure we don't leak the CCS state
+ * from the previous user, so make sure we overwrite it
+ * with something.
+ */
+ err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
+ dst_offset, DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+ }
+
+ /* Arbitration is re-enabled between requests. */
+out_rq:
+ if (*out)
+ i915_request_put(*out);
+ *out = i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (err)
+ break;
+
+ if (!bytes_to_cpy && !ccs_bytes_to_cpy) {
+ if (src_is_lmem)
+ WARN_ON(it_src.sg && sg_dma_len(it_src.sg));
+ else
+ WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg));
+ break;
+ }
+
+ if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) ||
+ !it_dst.sg || !sg_dma_len(it_dst.sg) ||
+ (ccs_bytes_to_cpy && (!it_ccs.sg ||
+ !sg_dma_len(it_ccs.sg))))) {
+ err = -EINVAL;
+ break;
+ }
+
+ cond_resched();
+ } while (1);
+
+out_ce:
+ return err;
+}
+
+static int emit_clear(struct i915_request *rq, u32 offset, int size,
+ u32 value, bool is_lmem)
+{
+ struct drm_i915_private *i915 = rq->engine->i915;
+ int mocs = rq->engine->gt->mocs.uc_index << 1;
+ const int ver = GRAPHICS_VER(i915);
+ int ring_sz;
+ u32 *cs;
+
+ GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+ if (HAS_FLAT_CCS(i915) && ver >= 12)
+ ring_sz = XY_FAST_COLOR_BLT_DW;
+ else if (ver >= 8)
+ ring_sz = 8;
+ else
+ ring_sz = 6;
+
+ cs = intel_ring_begin(rq, ring_sz);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (HAS_FLAT_CCS(i915) && ver >= 12) {
+ *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
+ (XY_FAST_COLOR_BLT_DW - 2);
+ *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
+ (PAGE_SIZE - 1);
+ *cs++ = 0;
+ *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = offset;
+ *cs++ = rq->engine->instance;
+ *cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
+ /* BG7 */
+ *cs++ = value;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ /* BG11 */
+ *cs++ = 0;
+ *cs++ = 0;
+ /* BG13 */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ } else if (ver >= 8) {
+ *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cs++ = 0;
+ *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = offset;
+ *cs++ = rq->engine->instance;
+ *cs++ = value;
+ *cs++ = MI_NOOP;
+ } else {
+ *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cs++ = 0;
+ *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = offset;
+ *cs++ = value;
+ }
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+int
+intel_context_migrate_clear(struct intel_context *ce,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool is_lmem,
+ u32 value,
+ struct i915_request **out)
+{
+ struct drm_i915_private *i915 = ce->engine->i915;
+ struct sgt_dma it = sg_sgt(sg);
+ struct i915_request *rq;
+ u32 offset;
+ int err;
+
+ GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+ *out = NULL;
+
+ GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+ offset = 0;
+ if (HAS_64K_PAGES(i915) && is_lmem)
+ offset = CHUNK_SZ;
+
+ do {
+ int len;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
+ if (err)
+ goto out_rq;
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ deps = NULL;
+ }
+
+ /* The PTE updates + clear must not be interrupted. */
+ err = emit_no_arbitration(rq);
+ if (err)
+ goto out_rq;
+
+ len = emit_pte(rq, &it, cache_level, is_lmem, offset, CHUNK_SZ);
+ if (len <= 0) {
+ err = len;
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_clear(rq, offset, len, value, is_lmem);
+ if (err)
+ goto out_rq;
+
+ if (HAS_FLAT_CCS(i915) && is_lmem && !value) {
+ /*
+ * copy the content of memory into corresponding
+ * ccs surface
+ */
+ err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset,
+ DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+
+ /* Arbitration is re-enabled between requests. */
+out_rq:
+ if (*out)
+ i915_request_put(*out);
+ *out = i915_request_get(rq);
+ i915_request_add(rq);
+ if (err || !it.sg || !sg_dma_len(it.sg))
+ break;
+
+ cond_resched();
+ } while (1);
+
+out_ce:
+ return err;
+}
+
+int intel_migrate_copy(struct intel_migrate *m,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_deps *deps,
+ struct scatterlist *src,
+ enum i915_cache_level src_cache_level,
+ bool src_is_lmem,
+ struct scatterlist *dst,
+ enum i915_cache_level dst_cache_level,
+ bool dst_is_lmem,
+ struct i915_request **out)
+{
+ struct intel_context *ce;
+ int err;
+
+ *out = NULL;
+ if (!m->context)
+ return -ENODEV;
+
+ ce = intel_migrate_create_context(m);
+ if (IS_ERR(ce))
+ ce = intel_context_get(m->context);
+ GEM_BUG_ON(IS_ERR(ce));
+
+ err = intel_context_pin_ww(ce, ww);
+ if (err)
+ goto out;
+
+ err = intel_context_migrate_copy(ce, deps,
+ src, src_cache_level, src_is_lmem,
+ dst, dst_cache_level, dst_is_lmem,
+ out);
+
+ intel_context_unpin(ce);
+out:
+ intel_context_put(ce);
+ return err;
+}
+
+int
+intel_migrate_clear(struct intel_migrate *m,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool is_lmem,
+ u32 value,
+ struct i915_request **out)
+{
+ struct intel_context *ce;
+ int err;
+
+ *out = NULL;
+ if (!m->context)
+ return -ENODEV;
+
+ ce = intel_migrate_create_context(m);
+ if (IS_ERR(ce))
+ ce = intel_context_get(m->context);
+ GEM_BUG_ON(IS_ERR(ce));
+
+ err = intel_context_pin_ww(ce, ww);
+ if (err)
+ goto out;
+
+ err = intel_context_migrate_clear(ce, deps, sg, cache_level,
+ is_lmem, value, out);
+
+ intel_context_unpin(ce);
+out:
+ intel_context_put(ce);
+ return err;
+}
+
+void intel_migrate_fini(struct intel_migrate *m)
+{
+ struct intel_context *ce;
+
+ ce = fetch_and_zero(&m->context);
+ if (!ce)
+ return;
+
+ intel_engine_destroy_pinned_context(ce);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_migrate.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.h b/drivers/gpu/drm/i915/gt/intel_migrate.h
new file mode 100644
index 000000000..ccc677ec4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_MIGRATE__
+#define __INTEL_MIGRATE__
+
+#include <linux/types.h>
+
+#include "intel_migrate_types.h"
+
+struct dma_fence;
+struct i915_deps;
+struct i915_request;
+struct i915_gem_ww_ctx;
+struct intel_gt;
+struct scatterlist;
+enum i915_cache_level;
+
+int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt);
+
+struct intel_context *intel_migrate_create_context(struct intel_migrate *m);
+
+int intel_migrate_copy(struct intel_migrate *m,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_deps *deps,
+ struct scatterlist *src,
+ enum i915_cache_level src_cache_level,
+ bool src_is_lmem,
+ struct scatterlist *dst,
+ enum i915_cache_level dst_cache_level,
+ bool dst_is_lmem,
+ struct i915_request **out);
+
+int intel_context_migrate_copy(struct intel_context *ce,
+ const struct i915_deps *deps,
+ struct scatterlist *src,
+ enum i915_cache_level src_cache_level,
+ bool src_is_lmem,
+ struct scatterlist *dst,
+ enum i915_cache_level dst_cache_level,
+ bool dst_is_lmem,
+ struct i915_request **out);
+
+int
+intel_migrate_clear(struct intel_migrate *m,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool is_lmem,
+ u32 value,
+ struct i915_request **out);
+int
+intel_context_migrate_clear(struct intel_context *ce,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool is_lmem,
+ u32 value,
+ struct i915_request **out);
+
+void intel_migrate_fini(struct intel_migrate *m);
+
+#endif /* __INTEL_MIGRATE__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate_types.h b/drivers/gpu/drm/i915/gt/intel_migrate_types.h
new file mode 100644
index 000000000..d98230597
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_migrate_types.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_MIGRATE_TYPES__
+#define __INTEL_MIGRATE_TYPES__
+
+struct intel_context;
+
+struct intel_migrate {
+ struct intel_context *context;
+};
+
+#endif /* __INTEL_MIGRATE_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
new file mode 100644
index 000000000..152244d7f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2015 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_engine.h"
+#include "intel_gt.h"
+#include "intel_gt_regs.h"
+#include "intel_mocs.h"
+#include "intel_ring.h"
+
+/* structures required */
+struct drm_i915_mocs_entry {
+ u32 control_value;
+ u16 l3cc_value;
+ u16 used;
+};
+
+struct drm_i915_mocs_table {
+ unsigned int size;
+ unsigned int n_entries;
+ const struct drm_i915_mocs_entry *table;
+ u8 uc_index;
+ u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
+ u8 unused_entries_index;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define _LE_CACHEABILITY(value) ((value) << 0)
+#define _LE_TGT_CACHE(value) ((value) << 2)
+#define LE_LRUM(value) ((value) << 4)
+#define LE_AOM(value) ((value) << 6)
+#define LE_RSC(value) ((value) << 7)
+#define LE_SCC(value) ((value) << 8)
+#define LE_PFM(value) ((value) << 11)
+#define LE_SCF(value) ((value) << 14)
+#define LE_COS(value) ((value) << 15)
+#define LE_SSE(value) ((value) << 17)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value) ((value) << 0)
+#define L3_SCC(value) ((value) << 1)
+#define _L3_CACHEABILITY(value) ((value) << 4)
+#define L3_GLBGO(value) ((value) << 6)
+#define L3_LKUP(value) ((value) << 7)
+
+/* Helper defines */
+#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define PVC_NUM_MOCS_ENTRIES 3
+
+/* (e)LLC caching options */
+/*
+ * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
+ * the same as LE_UC
+ */
+#define LE_0_PAGETABLE _LE_CACHEABILITY(0)
+#define LE_1_UC _LE_CACHEABILITY(1)
+#define LE_2_WT _LE_CACHEABILITY(2)
+#define LE_3_WB _LE_CACHEABILITY(3)
+
+/* Target cache */
+#define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0)
+#define LE_TC_1_LLC _LE_TGT_CACHE(1)
+#define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2)
+#define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3)
+
+/* L3 caching options */
+#define L3_0_DIRECT _L3_CACHEABILITY(0)
+#define L3_1_UC _L3_CACHEABILITY(1)
+#define L3_2_RESERVED _L3_CACHEABILITY(2)
+#define L3_3_WB _L3_CACHEABILITY(3)
+
+#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
+ [__idx] = { \
+ .control_value = __control_value, \
+ .l3cc_value = __l3cc_value, \
+ .used = 1, \
+ }
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * HW platforms, and for ICL+, be identical across OSes. To achieve
+ * that, for Icelake and above, list of entries is published as part
+ * of bspec.
+ *
+ * Entries not part of the following tables are undefined as far as
+ * userspace is concerned and shouldn't be relied upon. For Gen < 12
+ * they will be initialized to PTE. Gen >= 12 don't have a setting for
+ * PTE and those platforms except TGL/RKL will be initialized L3 WB to
+ * catch accidental use of reserved and unused mocs indexes.
+ *
+ * The last few entries are reserved by the hardware. For ICL+ they
+ * should be initialized according to bspec and never used, for older
+ * platforms they should never be written to.
+ *
+ * NOTE1: These tables are part of bspec and defined as part of hardware
+ * interface for ICL+. For older platforms, they are part of kernel
+ * ABI. It is expected that, for specific hardware platform, existing
+ * entries will remain constant and the table will only be updated by
+ * adding new entries, filling unused positions.
+ *
+ * NOTE2: For GEN >= 12 except TGL and RKL, reserved and unspecified MOCS
+ * indices have been set to L3 WB. These reserved entries should never
+ * be used, they may be changed to low performant variants with better
+ * coherency in the future if more entries are needed.
+ * For TGL/RKL, all the unspecified MOCS indexes are mapped to L3 UC.
+ */
+#define GEN9_MOCS_ENTRIES \
+ MOCS_ENTRY(I915_MOCS_UNCACHED, \
+ LE_1_UC | LE_TC_2_LLC_ELLC, \
+ L3_1_UC), \
+ MOCS_ENTRY(I915_MOCS_PTE, \
+ LE_0_PAGETABLE | LE_TC_0_PAGETABLE | LE_LRUM(3), \
+ L3_3_WB)
+
+static const struct drm_i915_mocs_entry skl_mocs_table[] = {
+ GEN9_MOCS_ENTRIES,
+ MOCS_ENTRY(I915_MOCS_CACHED,
+ LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
+ L3_3_WB),
+
+ /*
+ * mocs:63
+ * - used by the L3 for all of its evictions.
+ * Thus it is expected to allow LLC cacheability to enable coherent
+ * flows to be maintained.
+ * - used to force L3 uncachable cycles.
+ * Thus it is expected to make the surface L3 uncacheable.
+ */
+ MOCS_ENTRY(63,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC)
+};
+
+/* NOTE: the LE_TGT_CACHE is not used on Broxton */
+static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+ GEN9_MOCS_ENTRIES,
+ MOCS_ENTRY(I915_MOCS_CACHED,
+ LE_1_UC | LE_TC_2_LLC_ELLC | LE_LRUM(3),
+ L3_3_WB)
+};
+
+#define GEN11_MOCS_ENTRIES \
+ /* Entries 0 and 1 are defined per-platform */ \
+ /* Base - L3 + LLC */ \
+ MOCS_ENTRY(2, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+ L3_3_WB), \
+ /* Base - Uncached */ \
+ MOCS_ENTRY(3, \
+ LE_1_UC | LE_TC_1_LLC, \
+ L3_1_UC), \
+ /* Base - L3 */ \
+ MOCS_ENTRY(4, \
+ LE_1_UC | LE_TC_1_LLC, \
+ L3_3_WB), \
+ /* Base - LLC */ \
+ MOCS_ENTRY(5, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+ L3_1_UC), \
+ /* Age 0 - LLC */ \
+ MOCS_ENTRY(6, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
+ L3_1_UC), \
+ /* Age 0 - L3 + LLC */ \
+ MOCS_ENTRY(7, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
+ L3_3_WB), \
+ /* Age: Don't Chg. - LLC */ \
+ MOCS_ENTRY(8, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
+ L3_1_UC), \
+ /* Age: Don't Chg. - L3 + LLC */ \
+ MOCS_ENTRY(9, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
+ L3_3_WB), \
+ /* No AOM - LLC */ \
+ MOCS_ENTRY(10, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
+ L3_1_UC), \
+ /* No AOM - L3 + LLC */ \
+ MOCS_ENTRY(11, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
+ L3_3_WB), \
+ /* No AOM; Age 0 - LLC */ \
+ MOCS_ENTRY(12, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
+ L3_1_UC), \
+ /* No AOM; Age 0 - L3 + LLC */ \
+ MOCS_ENTRY(13, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
+ L3_3_WB), \
+ /* No AOM; Age:DC - LLC */ \
+ MOCS_ENTRY(14, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
+ L3_1_UC), \
+ /* No AOM; Age:DC - L3 + LLC */ \
+ MOCS_ENTRY(15, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
+ L3_3_WB), \
+ /* Bypass LLC - Uncached (EHL+) */ \
+ MOCS_ENTRY(16, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_1_UC), \
+ /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+ MOCS_ENTRY(17, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_3_WB), \
+ /* Self-Snoop - L3 + LLC */ \
+ MOCS_ENTRY(18, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(12.5%) */ \
+ MOCS_ENTRY(19, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(25%) */ \
+ MOCS_ENTRY(20, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(50%) */ \
+ MOCS_ENTRY(21, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(75%) */ \
+ MOCS_ENTRY(22, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(87.5%) */ \
+ MOCS_ENTRY(23, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7), \
+ L3_3_WB), \
+ /* HW Reserved - SW program but never use */ \
+ MOCS_ENTRY(62, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+ L3_1_UC), \
+ /* HW Reserved - SW program but never use */ \
+ MOCS_ENTRY(63, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+ L3_1_UC)
+
+static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
+ /*
+ * NOTE:
+ * Reserved and unspecified MOCS indices have been set to (L3 + LCC).
+ * These reserved entries should never be used, they may be changed
+ * to low performant variants with better coherency in the future if
+ * more entries are needed. We are programming index I915_MOCS_PTE(1)
+ * only, __init_mocs_table() take care to program unused index with
+ * this entry.
+ */
+ MOCS_ENTRY(I915_MOCS_PTE,
+ LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
+ L3_1_UC),
+ GEN11_MOCS_ENTRIES,
+
+ /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+ MOCS_ENTRY(48,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 */
+ MOCS_ENTRY(49,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + LLC */
+ MOCS_ENTRY(50,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Implicitly enable L1 - HDC:L1 */
+ MOCS_ENTRY(51,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* HW Special Case (CCS) */
+ MOCS_ENTRY(60,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Special Case (Displayable) */
+ MOCS_ENTRY(61,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+};
+
+static const struct drm_i915_mocs_entry icl_mocs_table[] = {
+ /* Base - Uncached (Deprecated) */
+ MOCS_ENTRY(I915_MOCS_UNCACHED,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* Base - L3 + LeCC:PAT (Deprecated) */
+ MOCS_ENTRY(I915_MOCS_PTE,
+ LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
+ L3_3_WB),
+
+ GEN11_MOCS_ENTRIES
+};
+
+static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
+
+ /* UC */
+ MOCS_ENTRY(1, 0, L3_1_UC),
+ /* WB - L3 */
+ MOCS_ENTRY(5, 0, L3_3_WB),
+ /* WB - L3 50% */
+ MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB),
+ /* WB - L3 25% */
+ MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB),
+ /* WB - L3 12.5% */
+ MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB),
+
+ /* HDC:L1 + L3 */
+ MOCS_ENTRY(48, 0, L3_3_WB),
+ /* HDC:L1 */
+ MOCS_ENTRY(49, 0, L3_1_UC),
+
+ /* HW Reserved */
+ MOCS_ENTRY(60, 0, L3_1_UC),
+ MOCS_ENTRY(61, 0, L3_1_UC),
+ MOCS_ENTRY(62, 0, L3_1_UC),
+ MOCS_ENTRY(63, 0, L3_1_UC),
+};
+
+static const struct drm_i915_mocs_entry gen12_mocs_table[] = {
+ GEN11_MOCS_ENTRIES,
+ /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+ MOCS_ENTRY(48,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 */
+ MOCS_ENTRY(49,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + LLC */
+ MOCS_ENTRY(50,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Implicitly enable L1 - HDC:L1 */
+ MOCS_ENTRY(51,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* HW Special Case (CCS) */
+ MOCS_ENTRY(60,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Special Case (Displayable) */
+ MOCS_ENTRY(61,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+};
+
+static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = {
+ /* wa_1608975824 */
+ MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)),
+
+ /* UC - Coherent; GO:L3 */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)),
+ /* UC - Non-Coherent; GO:L3 */
+ MOCS_ENTRY(4, 0, L3_1_UC),
+
+ /* WB */
+ MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)),
+
+ /* HW Reserved - SW program but never use. */
+ MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)),
+ MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)),
+ MOCS_ENTRY(60, 0, L3_1_UC),
+ MOCS_ENTRY(61, 0, L3_1_UC),
+ MOCS_ENTRY(62, 0, L3_1_UC),
+ MOCS_ENTRY(63, 0, L3_1_UC),
+};
+
+static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
+ /* UC - Coherent; GO:L3 */
+ MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+
+ /* WB - LC */
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+};
+
+static const struct drm_i915_mocs_entry dg2_mocs_table_g10_ax[] = {
+ /* Wa_14011441408: Set Go to Memory for MOCS#0 */
+ MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+
+ /* WB - LC */
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+};
+
+static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
+ /* Error */
+ MOCS_ENTRY(0, 0, L3_3_WB),
+
+ /* UC */
+ MOCS_ENTRY(1, 0, L3_1_UC),
+
+ /* WB */
+ MOCS_ENTRY(2, 0, L3_3_WB),
+};
+
+enum {
+ HAS_GLOBAL_MOCS = BIT(0),
+ HAS_ENGINE_MOCS = BIT(1),
+ HAS_RENDER_L3CC = BIT(2),
+};
+
+static bool has_l3cc(const struct drm_i915_private *i915)
+{
+ return true;
+}
+
+static bool has_global_mocs(const struct drm_i915_private *i915)
+{
+ return HAS_GLOBAL_MOCS_REGISTERS(i915);
+}
+
+static bool has_mocs(const struct drm_i915_private *i915)
+{
+ return !IS_DGFX(i915);
+}
+
+static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+ struct drm_i915_mocs_table *table)
+{
+ unsigned int flags;
+
+ memset(table, 0, sizeof(struct drm_i915_mocs_table));
+
+ table->unused_entries_index = I915_MOCS_PTE;
+ if (IS_PONTEVECCHIO(i915)) {
+ table->size = ARRAY_SIZE(pvc_mocs_table);
+ table->table = pvc_mocs_table;
+ table->n_entries = PVC_NUM_MOCS_ENTRIES;
+ table->uc_index = 1;
+ table->wb_index = 2;
+ table->unused_entries_index = 2;
+ } else if (IS_DG2(i915)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
+ table->table = dg2_mocs_table_g10_ax;
+ } else {
+ table->size = ARRAY_SIZE(dg2_mocs_table);
+ table->table = dg2_mocs_table;
+ }
+ table->uc_index = 1;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->unused_entries_index = 3;
+ } else if (IS_XEHPSDV(i915)) {
+ table->size = ARRAY_SIZE(xehpsdv_mocs_table);
+ table->table = xehpsdv_mocs_table;
+ table->uc_index = 2;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->unused_entries_index = 5;
+ } else if (IS_DG1(i915)) {
+ table->size = ARRAY_SIZE(dg1_mocs_table);
+ table->table = dg1_mocs_table;
+ table->uc_index = 1;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->uc_index = 1;
+ table->unused_entries_index = 5;
+ } else if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+ /* For TGL/RKL, Can't be changed now for ABI reasons */
+ table->size = ARRAY_SIZE(tgl_mocs_table);
+ table->table = tgl_mocs_table;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->uc_index = 3;
+ } else if (GRAPHICS_VER(i915) >= 12) {
+ table->size = ARRAY_SIZE(gen12_mocs_table);
+ table->table = gen12_mocs_table;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->uc_index = 3;
+ table->unused_entries_index = 2;
+ } else if (GRAPHICS_VER(i915) == 11) {
+ table->size = ARRAY_SIZE(icl_mocs_table);
+ table->table = icl_mocs_table;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ } else if (IS_GEN9_BC(i915)) {
+ table->size = ARRAY_SIZE(skl_mocs_table);
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->table = skl_mocs_table;
+ } else if (IS_GEN9_LP(i915)) {
+ table->size = ARRAY_SIZE(broxton_mocs_table);
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->table = broxton_mocs_table;
+ } else {
+ drm_WARN_ONCE(&i915->drm, GRAPHICS_VER(i915) >= 9,
+ "Platform that should have a MOCS table does not.\n");
+ return 0;
+ }
+
+ if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
+ return 0;
+
+ /* WaDisableSkipCaching:skl,bxt,kbl,glk */
+ if (GRAPHICS_VER(i915) == 9) {
+ int i;
+
+ for (i = 0; i < table->size; i++)
+ if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
+ (L3_ESC(1) | L3_SCC(0x7))))
+ return 0;
+ }
+
+ flags = 0;
+ if (has_mocs(i915)) {
+ if (has_global_mocs(i915))
+ flags |= HAS_GLOBAL_MOCS;
+ else
+ flags |= HAS_ENGINE_MOCS;
+ }
+ if (has_l3cc(i915))
+ flags |= HAS_RENDER_L3CC;
+
+ return flags;
+}
+
+/*
+ * Get control_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is non-zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
+ */
+static u32 get_entry_control(const struct drm_i915_mocs_table *table,
+ unsigned int index)
+{
+ if (index < table->size && table->table[index].used)
+ return table->table[index].control_value;
+ return table->table[table->unused_entries_index].control_value;
+}
+
+#define for_each_mocs(mocs, t, i) \
+ for (i = 0; \
+ i < (t)->n_entries ? (mocs = get_entry_control((t), i)), 1 : 0;\
+ i++)
+
+static void __init_mocs_table(struct intel_uncore *uncore,
+ const struct drm_i915_mocs_table *table,
+ u32 addr)
+{
+ unsigned int i;
+ u32 mocs;
+
+ drm_WARN_ONCE(&uncore->i915->drm, !table->unused_entries_index,
+ "Unused entries index should have been defined\n");
+ for_each_mocs(mocs, table, i)
+ intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
+}
+
+static u32 mocs_offset(const struct intel_engine_cs *engine)
+{
+ static const u32 offset[] = {
+ [RCS0] = __GEN9_RCS0_MOCS0,
+ [VCS0] = __GEN9_VCS0_MOCS0,
+ [VCS1] = __GEN9_VCS1_MOCS0,
+ [VECS0] = __GEN9_VECS0_MOCS0,
+ [BCS0] = __GEN9_BCS0_MOCS0,
+ [VCS2] = __GEN11_VCS2_MOCS0,
+ };
+
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(offset));
+ return offset[engine->id];
+}
+
+static void init_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
+{
+ __init_mocs_table(engine->uncore, table, mocs_offset(engine));
+}
+
+/*
+ * Get l3cc_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is not zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
+ */
+static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
+ unsigned int index)
+{
+ if (index < table->size && table->table[index].used)
+ return table->table[index].l3cc_value;
+ return table->table[table->unused_entries_index].l3cc_value;
+}
+
+static u32 l3cc_combine(u16 low, u16 high)
+{
+ return low | (u32)high << 16;
+}
+
+#define for_each_l3cc(l3cc, t, i) \
+ for (i = 0; \
+ i < ((t)->n_entries + 1) / 2 ? \
+ (l3cc = l3cc_combine(get_entry_l3cc((t), 2 * i), \
+ get_entry_l3cc((t), 2 * i + 1))), 1 : \
+ 0; \
+ i++)
+
+static void init_l3cc_table(struct intel_uncore *uncore,
+ const struct drm_i915_mocs_table *table)
+{
+ unsigned int i;
+ u32 l3cc;
+
+ for_each_l3cc(l3cc, table, i)
+ intel_uncore_write_fw(uncore, GEN9_LNCFCMOCS(i), l3cc);
+}
+
+void intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_mocs_table table;
+ unsigned int flags;
+
+ /* Called under a blanket forcewake */
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ flags = get_mocs_settings(engine->i915, &table);
+ if (!flags)
+ return;
+
+ /* Platforms with global MOCS do not need per-engine initialization. */
+ if (flags & HAS_ENGINE_MOCS)
+ init_mocs_table(engine, &table);
+
+ if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
+ init_l3cc_table(engine->uncore, &table);
+}
+
+static u32 global_mocs_offset(void)
+{
+ return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
+}
+
+void intel_set_mocs_index(struct intel_gt *gt)
+{
+ struct drm_i915_mocs_table table;
+
+ get_mocs_settings(gt->i915, &table);
+ gt->mocs.uc_index = table.uc_index;
+ if (HAS_L3_CCS_READ(gt->i915))
+ gt->mocs.wb_index = table.wb_index;
+}
+
+void intel_mocs_init(struct intel_gt *gt)
+{
+ struct drm_i915_mocs_table table;
+ unsigned int flags;
+
+ /*
+ * LLC and eDRAM control values are not applicable to dgfx
+ */
+ flags = get_mocs_settings(gt->i915, &table);
+ if (flags & HAS_GLOBAL_MOCS)
+ __init_mocs_table(gt->uncore, &table, global_mocs_offset());
+
+ /*
+ * Initialize the L3CC table as part of mocs initalization to make
+ * sure the LNCFCMOCSx registers are programmed for the subsequent
+ * memory transactions including guc transactions
+ */
+ if (flags & HAS_RENDER_L3CC)
+ init_l3cc_table(gt->uncore, &table);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
new file mode 100644
index 000000000..76db82721
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2015 Intel Corporation
+ */
+
+#ifndef INTEL_MOCS_H
+#define INTEL_MOCS_H
+
+/**
+ * DOC: Memory Objects Control State (MOCS)
+ *
+ * Motivation:
+ * In previous Gens the MOCS settings was a value that was set by user land as
+ * part of the batch. In Gen9 this has changed to be a single table (per ring)
+ * that all batches now reference by index instead of programming the MOCS
+ * directly.
+ *
+ * The one wrinkle in this is that only PART of the MOCS tables are included
+ * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
+ * registers). The rest are not (the settings for the other rings).
+ *
+ * This table needs to be set at system start-up because the way the table
+ * interacts with the contexts and the GmmLib interface.
+ *
+ *
+ * Implementation:
+ *
+ * The tables (one per supported platform) are defined in intel_mocs.c
+ * and are programmed in the first batch after the context is loaded
+ * (with the hardware workarounds). This will then let the usual
+ * context handling keep the MOCS in step.
+ */
+
+struct intel_engine_cs;
+struct intel_gt;
+
+void intel_mocs_init(struct intel_gt *gt);
+void intel_mocs_init_engine(struct intel_engine_cs *engine);
+void intel_set_mocs_index(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
new file mode 100644
index 000000000..7ecfa672f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "i915_trace.h"
+#include "intel_gtt.h"
+#include "gen6_ppgtt.h"
+#include "gen8_ppgtt.h"
+
+struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz)
+{
+ struct i915_page_table *pt;
+
+ pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pt))
+ return ERR_PTR(-ENOMEM);
+
+ pt->base = vm->alloc_pt_dma(vm, sz);
+ if (IS_ERR(pt->base)) {
+ kfree(pt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pt->is_compact = false;
+ atomic_set(&pt->used, 0);
+ return pt;
+}
+
+struct i915_page_directory *__alloc_pd(int count)
+{
+ struct i915_page_directory *pd;
+
+ pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pd))
+ return NULL;
+
+ pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pd->entry)) {
+ kfree(pd);
+ return NULL;
+ }
+
+ spin_lock_init(&pd->lock);
+ return pd;
+}
+
+struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
+{
+ struct i915_page_directory *pd;
+
+ pd = __alloc_pd(I915_PDES);
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(pd->pt.base)) {
+ kfree(pd->entry);
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pd;
+}
+
+void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
+{
+ BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
+
+ if (lvl) {
+ struct i915_page_directory *pd =
+ container_of(pt, typeof(*pd), pt);
+ kfree(pd->entry);
+ }
+
+ if (pt->base)
+ i915_gem_object_put(pt->base);
+
+ kfree(pt);
+}
+
+static void
+write_dma_entry(struct drm_i915_gem_object * const pdma,
+ const unsigned short idx,
+ const u64 encoded_entry)
+{
+ u64 * const vaddr = __px_vaddr(pdma);
+
+ vaddr[idx] = encoded_entry;
+ drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
+}
+
+void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
+{
+ /* Each thread pre-pins the pd, and we may have a thread per pde. */
+ GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
+
+ atomic_inc(px_used(pd));
+ pd->entry[idx] = to;
+ write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
+}
+
+void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct drm_i915_gem_object * const scratch)
+{
+ GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
+
+ write_dma_entry(px_base(pd), idx, scratch->encode);
+ pd->entry[idx] = NULL;
+ atomic_dec(px_used(pd));
+}
+
+bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct drm_i915_gem_object * const scratch)
+{
+ bool free = false;
+
+ if (atomic_add_unless(&pt->used, -1, 1))
+ return false;
+
+ spin_lock(&pd->lock);
+ if (atomic_dec_and_test(&pt->used)) {
+ clear_pd_entry(pd, idx, scratch);
+ free = true;
+ }
+ spin_unlock(&pd->lock);
+
+ return free;
+}
+
+int i915_ppgtt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ gtt_write_workarounds(gt);
+
+ if (GRAPHICS_VER(i915) == 6)
+ gen6_ppgtt_enable(gt);
+ else if (GRAPHICS_VER(i915) == 7)
+ gen7_ppgtt_enable(gt);
+
+ return 0;
+}
+
+static struct i915_ppgtt *
+__ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
+{
+ if (GRAPHICS_VER(gt->i915) < 8)
+ return gen6_ppgtt_create(gt);
+ else
+ return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
+}
+
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
+ if (IS_ERR(ppgtt))
+ return ppgtt;
+
+ trace_i915_ppgtt_create(&ppgtt->vm);
+
+ return ppgtt;
+}
+
+void ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+
+ if (!vma_res->allocated) {
+ vm->allocate_va_range(vm, stash, vma_res->start,
+ vma_res->vma_size);
+ vma_res->allocated = true;
+ }
+
+ /* Applicable to VLV, and gen8+ */
+ pte_flags = 0;
+ if (vma_res->bi.readonly)
+ pte_flags |= PTE_READ_ONLY;
+ if (vma_res->bi.lmem)
+ pte_flags |= PTE_LM;
+
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+ wmb();
+}
+
+void ppgtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+ if (!vma_res->allocated)
+ return;
+
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (vma_res->tlb)
+ vma_invalidate_tlb(vm, vma_res->tlb);
+}
+
+static unsigned long pd_count(u64 size, int shift)
+{
+ /* Beware later misalignment */
+ return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
+}
+
+int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 size)
+{
+ unsigned long count;
+ int shift, n, pt_sz;
+
+ shift = vm->pd_shift;
+ if (!shift)
+ return 0;
+
+ pt_sz = stash->pt_sz;
+ if (!pt_sz)
+ pt_sz = I915_GTT_PAGE_SIZE_4K;
+ else
+ GEM_BUG_ON(!IS_DGFX(vm->i915));
+
+ GEM_BUG_ON(!is_power_of_2(pt_sz));
+
+ count = pd_count(size, shift);
+ while (count--) {
+ struct i915_page_table *pt;
+
+ pt = alloc_pt(vm, pt_sz);
+ if (IS_ERR(pt)) {
+ i915_vm_free_pt_stash(vm, stash);
+ return PTR_ERR(pt);
+ }
+
+ pt->stash = stash->pt[0];
+ stash->pt[0] = pt;
+ }
+
+ for (n = 1; n < vm->top; n++) {
+ shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
+ count = pd_count(size, shift);
+ while (count--) {
+ struct i915_page_directory *pd;
+
+ pd = alloc_pd(vm);
+ if (IS_ERR(pd)) {
+ i915_vm_free_pt_stash(vm, stash);
+ return PTR_ERR(pd);
+ }
+
+ pd->pt.stash = stash->pt[1];
+ stash->pt[1] = &pd->pt;
+ }
+ }
+
+ return 0;
+}
+
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash)
+{
+ struct i915_page_table *pt;
+ int n, err;
+
+ for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
+ for (pt = stash->pt[n]; pt; pt = pt->stash) {
+ err = map_pt_dma_locked(vm, pt->base);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void i915_vm_free_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash)
+{
+ struct i915_page_table *pt;
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
+ while ((pt = stash->pt[n])) {
+ stash->pt[n] = pt->stash;
+ free_px(vm, pt, n);
+ }
+ }
+}
+
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ ppgtt->vm.gt = gt;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = i915->drm.dev;
+ ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
+ ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
+
+ dma_resv_init(&ppgtt->vm._resv);
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
new file mode 100644
index 000000000..f8d0523f4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -0,0 +1,821 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/string_helpers.h>
+
+#include "gem/i915_gem_region.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_vgpu.h"
+#include "intel_engine_regs.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_regs.h"
+#include "intel_pcode.h"
+#include "intel_rc6.h"
+
+/**
+ * DOC: RC6
+ *
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as
+ * well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+
+static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6)
+{
+ return container_of(rc6, struct intel_gt, rc6);
+}
+
+static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->uncore;
+}
+
+static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->i915;
+}
+
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
+static void gen11_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_gt *gt = rc6_to_gt(rc6);
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 pg_enable;
+ int i;
+
+ /*
+ * With GuCRC, these parameters are set by GuC
+ */
+ if (!intel_uc_uses_guc_rc(&gt->uc)) {
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ }
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it under 10us for Icelake, similar to
+ * Broadwell+, To be conservative, we want to factor in a context
+ * switch on top (due to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
+
+ /* 3a: Enable RC6
+ *
+ * With GuCRC, we do not enable bit 31 of RC_CTL,
+ * thus allowing GuC to control RC6 entry/exit fully instead.
+ * We will not set the HW ENABLE and EI bits
+ */
+ if (!intel_guc_rc_enable(&gt->uc.guc))
+ rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE;
+ else
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1);
+
+ /* Wa_16011777198 - Render powergating must remain disabled */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
+ pg_enable =
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
+ else
+ pg_enable =
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
+
+ if (GRAPHICS_VER(gt->i915) >= 12) {
+ for (i = 0; i < I915_MAX_VCS; i++)
+ if (HAS_ENGINE(gt, _VCS(i)))
+ pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
+ VDN_MFX_POWERGATE_ENABLE(i));
+ }
+
+ set(uncore, GEN9_PG_ENABLE, pg_enable);
+}
+
+static void gen9_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 11) {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ } else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
+ /*
+ * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
+ * when CPG is enabled
+ */
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+ } else {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ }
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1);
+
+ /*
+ * WaRsDisableCoarsePowerGating:skl,cnl
+ * - Render/Media PG need to be disabled with RC6.
+ */
+ if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
+}
+
+static void gen8_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+
+ /* 3: Enable RC6 */
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_RC6_ENABLE;
+}
+
+static void gen6_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6vids, rc6_mask;
+ int ret;
+
+ set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC1e_THRESHOLD, 1000);
+ set(uncore, GEN6_RC6_THRESHOLD, 50000);
+ set(uncore, GEN6_RC6p_THRESHOLD, 150000);
+ set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ /* We don't use those on Haswell */
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ if (HAS_RC6p(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ if (HAS_RC6pp(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ rc6->ctl_enable =
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE;
+
+ rc6vids = 0;
+ ret = snb_pcode_read(rc6_to_gt(rc6)->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
+ if (GRAPHICS_VER(i915) == 6 && ret) {
+ drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
+ } else if (GRAPHICS_VER(i915) == 6 &&
+ (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ drm_dbg(&i915->drm,
+ "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = snb_pcode_write(rc6_to_gt(rc6)->uncore, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ drm_err(&i915->drm,
+ "Couldn't fix incorrect rc6 voltage\n");
+ }
+}
+
+/* Check that the pcbr address is not empty. */
+static int chv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ resource_size_t pctx_paddr, paddr;
+ resource_size_t pctx_size = 32 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
+ paddr = i915->dsm.end + 1 - pctx_size;
+ GEM_BUG_ON(paddr > U32_MAX);
+
+ pctx_paddr = (paddr & ~4095);
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+ }
+
+ return 0;
+}
+
+static int vlv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_gem_object *pctx;
+ resource_size_t pctx_paddr;
+ resource_size_t pctx_size = 24 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if (pcbr) {
+ /* BIOS set it up already, grab the pre-alloc'd space */
+ resource_size_t pcbr_offset;
+
+ pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+ pctx = i915_gem_object_create_region_at(i915->mm.stolen_region,
+ pcbr_offset,
+ pctx_size,
+ 0);
+ if (IS_ERR(pctx))
+ return PTR_ERR(pctx);
+
+ goto out;
+ }
+
+ drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
+
+ /*
+ * From the Gunit register HAS:
+ * The Gfx driver is expected to program this register and ensure
+ * proper allocation within Gfx stolen memory. For example, this
+ * register should be programmed such than the PCBR range does not
+ * overlap with other ranges, such as the frame buffer, protected
+ * memory, or any other relevant ranges.
+ */
+ pctx = i915_gem_object_create_stolen(i915, pctx_size);
+ if (IS_ERR(pctx)) {
+ drm_dbg(&i915->drm,
+ "not enough stolen space for PCTX, disabling\n");
+ return PTR_ERR(pctx);
+ }
+
+ GEM_BUG_ON(range_overflows_end_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
+ pctx_paddr = i915->dsm.start + pctx->stolen->start;
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+
+out:
+ rc6->pctx = pctx;
+ return 0;
+}
+
+static void chv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2a: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /* TO threshold set to 500 us (0x186 * 1.28 us) */
+ set(uncore, GEN6_RC6_THRESHOLD, 0x186);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* 3: Enable RC6 */
+ rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
+}
+
+static void vlv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 0x557);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ rc6->ctl_enable =
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
+}
+
+static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ u32 rc6_ctx_base, rc_ctl, rc_sw_target;
+ bool enable_rc6 = true;
+
+ rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
+ rc_sw_target &= RC_SW_TARGET_STATE_MASK;
+ rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
+ drm_dbg(&i915->drm, "BIOS enabled RC states: "
+ "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+ str_on_off(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+ str_on_off(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+ rc_sw_target);
+
+ if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+ drm_dbg(&i915->drm, "RC6 Base location not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ /*
+ * The exact context size is not known for BXT, so assume a page size
+ * for this check.
+ */
+ rc6_ctx_base =
+ intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+ if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
+ rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+ drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
+ enable_rc6 = false;
+ }
+
+ if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT(RENDER_RING_BASE)) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT(GEN6_BSD_RING_BASE)) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT(BLT_RING_BASE)) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT(VEBOX_RING_BASE)) & IDLE_TIME_MASK) > 1)) {
+ drm_dbg(&i915->drm,
+ "Engine Idle wait time not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
+ drm_dbg(&i915->drm, "Pushbus not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
+ drm_dbg(&i915->drm, "GFX pause not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
+ drm_dbg(&i915->drm, "GPM control not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ return enable_rc6;
+}
+
+static bool rc6_supported(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!HAS_RC6(i915))
+ return false;
+
+ if (intel_vgpu_active(i915))
+ return false;
+
+ if (is_mock_gt(rc6_to_gt(rc6)))
+ return false;
+
+ if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
+ drm_notice(&i915->drm,
+ "RC6 and powersaving disabled by BIOS\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void rpm_get(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(rc6->wakeref);
+ pm_runtime_get_sync(rc6_to_i915(rc6)->drm.dev);
+ rc6->wakeref = true;
+}
+
+static void rpm_put(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(!rc6->wakeref);
+ pm_runtime_put(rc6_to_i915(rc6)->drm.dev);
+ rc6->wakeref = false;
+}
+
+static bool pctx_corrupted(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return false;
+
+ if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
+ return false;
+
+ drm_notice(&i915->drm,
+ "RC6 context corruption, disabling runtime power management\n");
+ return true;
+}
+
+static void __intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_gt *gt = rc6_to_gt(rc6);
+
+ /* Take control of RC6 back from GuC */
+ intel_guc_rc_disable(&gt->uc.guc);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (GRAPHICS_VER(i915) >= 9)
+ set(uncore, GEN9_PG_ENABLE, 0);
+ set(uncore, GEN6_RC_CONTROL, 0);
+ set(uncore, GEN6_RC_STATE, 0);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+}
+
+void intel_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ int err;
+
+ /* Disable runtime-pm until we can save the GPU state with rc6 pctx */
+ rpm_get(rc6);
+
+ if (!rc6_supported(rc6))
+ return;
+
+ if (IS_CHERRYVIEW(i915))
+ err = chv_rc6_init(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ err = vlv_rc6_init(rc6);
+ else
+ err = 0;
+
+ /* Sanitize rc6, ensure it is disabled before we are ready. */
+ __intel_rc6_disable(rc6);
+
+ rc6->supported = err == 0;
+}
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6)
+{
+ memset(rc6->prev_hw_residency, 0, sizeof(rc6->prev_hw_residency));
+
+ if (rc6->enabled) { /* unbalanced suspend/resume */
+ rpm_get(rc6);
+ rc6->enabled = false;
+ }
+
+ if (rc6->supported)
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->supported)
+ return;
+
+ GEM_BUG_ON(rc6->enabled);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rc6_enable(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rc6_enable(rc6);
+ else if (GRAPHICS_VER(i915) >= 11)
+ gen11_rc6_enable(rc6);
+ else if (GRAPHICS_VER(i915) >= 9)
+ gen9_rc6_enable(rc6);
+ else if (IS_BROADWELL(i915))
+ gen8_rc6_enable(rc6);
+ else if (GRAPHICS_VER(i915) >= 6)
+ gen6_rc6_enable(rc6);
+
+ rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ rc6->ctl_enable = 0;
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ if (unlikely(pctx_corrupted(rc6)))
+ return;
+
+ /* rc6 is ready, runtime-pm is go! */
+ rpm_put(rc6);
+ rc6->enabled = true;
+}
+
+void intel_rc6_unpark(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->enabled)
+ return;
+
+ /* Restore HW timers for automatic RC6 entry while busy */
+ set(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
+}
+
+void intel_rc6_park(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ unsigned int target;
+
+ if (!rc6->enabled)
+ return;
+
+ if (unlikely(pctx_corrupted(rc6))) {
+ intel_rc6_disable(rc6);
+ return;
+ }
+
+ if (!rc6->manual)
+ return;
+
+ /* Turn off the HW timers and go directly to rc6 */
+ set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
+
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ target = 0x6; /* deepest rc6 */
+ else if (HAS_RC6p(rc6_to_i915(rc6)))
+ target = 0x5; /* deep rc6 */
+ else
+ target = 0x4; /* normal rc6 */
+ set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
+}
+
+void intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ if (!rc6->enabled)
+ return;
+
+ rpm_get(rc6);
+ rc6->enabled = false;
+
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_fini(struct intel_rc6 *rc6)
+{
+ struct drm_i915_gem_object *pctx;
+
+ intel_rc6_disable(rc6);
+
+ pctx = fetch_and_zero(&rc6->pctx);
+ if (pctx)
+ i915_gem_object_put(pctx);
+
+ if (rc6->wakeref)
+ rpm_put(rc6);
+}
+
+static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
+{
+ u32 lower, upper, tmp;
+ int loop = 2;
+
+ /*
+ * The register accessed do not need forcewake. We borrow
+ * uncore lock to prevent concurrent access to range reg.
+ */
+ lockdep_assert_held(&uncore->lock);
+
+ /*
+ * vlv and chv residency counters are 40 bits in width.
+ * With a control bit, we can choose between upper or lower
+ * 32bit window into this counter.
+ *
+ * Although we always use the counter in high-range mode elsewhere,
+ * userspace may attempt to read the value before rc6 is initialised,
+ * before we have set the default VLV_COUNTER_CONTROL value. So always
+ * set the high bit to be safe.
+ */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ do {
+ tmp = upper;
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ lower = intel_uncore_read_fw(uncore, reg);
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ } while (upper != tmp && --loop);
+
+ /*
+ * Everywhere else we always use VLV_COUNTER_CONTROL with the
+ * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
+ * now.
+ */
+
+ return lower | (u64)upper << 8;
+}
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ u64 time_hw, prev_hw, overflow_hw;
+ unsigned int fw_domains;
+ unsigned long flags;
+ unsigned int i;
+ u32 mul, div;
+
+ if (!rc6->supported)
+ return 0;
+
+ /*
+ * Store previous hw counter values for counter wrap-around handling.
+ *
+ * There are only four interesting registers and they live next to each
+ * other so we can use the relative address, compared to the smallest
+ * one as the index into driver storage.
+ */
+ i = (i915_mmio_reg_offset(reg) -
+ i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+ if (drm_WARN_ON_ONCE(&i915->drm, i >= ARRAY_SIZE(rc6->cur_residency)))
+ return 0;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ mul = 1000000;
+ div = i915->czclk_freq;
+ overflow_hw = BIT_ULL(40);
+ time_hw = vlv_residency_raw(uncore, reg);
+ } else {
+ /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+ if (IS_GEN9_LP(i915)) {
+ mul = 10000;
+ div = 12;
+ } else {
+ mul = 1280;
+ div = 1;
+ }
+
+ overflow_hw = BIT_ULL(32);
+ time_hw = intel_uncore_read_fw(uncore, reg);
+ }
+
+ /*
+ * Counter wrap handling.
+ *
+ * But relying on a sufficient frequency of queries otherwise counters
+ * can still wrap.
+ */
+ prev_hw = rc6->prev_hw_residency[i];
+ rc6->prev_hw_residency[i] = time_hw;
+
+ /* RC6 delta from last sample. */
+ if (time_hw >= prev_hw)
+ time_hw -= prev_hw;
+ else
+ time_hw += overflow_hw - prev_hw;
+
+ /* Add delta to RC6 extended raw driver copy. */
+ time_hw += rc6->cur_residency[i];
+ rc6->cur_residency[i] = time_hw;
+
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, flags);
+
+ return mul_u64_u32_div(time_hw, mul, div);
+}
+
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rc6.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
new file mode 100644
index 000000000..b6fea71af
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_H
+#define INTEL_RC6_H
+
+#include "i915_reg_defs.h"
+
+struct intel_engine_cs;
+struct intel_rc6;
+
+void intel_rc6_init(struct intel_rc6 *rc6);
+void intel_rc6_fini(struct intel_rc6 *rc6);
+
+void intel_rc6_unpark(struct intel_rc6 *rc6);
+void intel_rc6_park(struct intel_rc6 *rc6);
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6);
+void intel_rc6_enable(struct intel_rc6 *rc6);
+void intel_rc6_disable(struct intel_rc6 *rc6);
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
+
+#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
new file mode 100644
index 000000000..e747492b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_TYPES_H
+#define INTEL_RC6_TYPES_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "intel_engine_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_rc6 {
+ u64 prev_hw_residency[4];
+ u64 cur_residency[4];
+
+ u32 ctl_enable;
+
+ struct drm_i915_gem_object *pctx;
+
+ bool supported : 1;
+ bool enabled : 1;
+ bool manual : 1;
+ bool wakeref : 1;
+};
+
+#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
new file mode 100644
index 000000000..f3ad93db0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_pci.h"
+#include "i915_reg.h"
+#include "intel_memory_region.h"
+#include "intel_pci_config.h"
+#include "intel_region_lmem.h"
+#include "intel_region_ttm.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
+#include "gt/intel_gt_regs.h"
+
+#ifdef CONFIG_64BIT
+static void _release_bars(struct pci_dev *pdev)
+{
+ int resno;
+
+ for (resno = PCI_STD_RESOURCES; resno < PCI_STD_RESOURCE_END; resno++) {
+ if (pci_resource_len(pdev, resno))
+ pci_release_resource(pdev, resno);
+ }
+}
+
+static void
+_resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ int bar_size = pci_rebar_bytes_to_size(size);
+ int ret;
+
+ _release_bars(pdev);
+
+ ret = pci_resize_resource(pdev, resno, bar_size);
+ if (ret) {
+ drm_info(&i915->drm, "Failed to resize BAR%d to %dM (%pe)\n",
+ resno, 1 << bar_size, ERR_PTR(ret));
+ return;
+ }
+
+ drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size);
+}
+
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_bus *root = pdev->bus;
+ struct resource *root_res;
+ resource_size_t rebar_size;
+ resource_size_t current_size;
+ u32 pci_cmd;
+ int i;
+
+ current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
+
+ if (i915->params.lmem_bar_size) {
+ u32 bar_sizes;
+
+ rebar_size = i915->params.lmem_bar_size *
+ (resource_size_t)SZ_1M;
+ bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
+
+ if (rebar_size == current_size)
+ return;
+
+ if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
+ rebar_size >= roundup_pow_of_two(lmem_size)) {
+ rebar_size = lmem_size;
+
+ drm_info(&i915->drm,
+ "Given bar size is not within supported size, setting it to default: %llu\n",
+ (u64)lmem_size >> 20);
+ }
+ } else {
+ rebar_size = current_size;
+
+ if (rebar_size != roundup_pow_of_two(lmem_size))
+ rebar_size = lmem_size;
+ else
+ return;
+ }
+
+ /* Find out if root bus contains 64bit memory addressing */
+ while (root->parent)
+ root = root->parent;
+
+ pci_bus_for_each_resource(root, root_res, i) {
+ if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
+ root_res->start > 0x100000000ull)
+ break;
+ }
+
+ /* pci_resize_resource will fail anyways */
+ if (!root_res) {
+ drm_info(&i915->drm, "Can't resize LMEM BAR - platform support is missing\n");
+ return;
+ }
+
+ /* First disable PCI memory decoding references */
+ pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_dword(pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_MEMORY);
+
+ _resize_bar(i915, GEN12_LMEM_BAR, rebar_size);
+
+ pci_assign_unassigned_bus_resources(pdev->bus);
+ pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
+}
+#else
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
+#endif
+
+static int
+region_lmem_release(struct intel_memory_region *mem)
+{
+ int ret;
+
+ ret = intel_region_ttm_fini(mem);
+ io_mapping_fini(&mem->iomap);
+
+ return ret;
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+ int ret;
+
+ if (!io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ mem->io_size))
+ return -EIO;
+
+ ret = intel_region_ttm_init(mem);
+ if (ret)
+ goto out_no_buddy;
+
+ return 0;
+
+out_no_buddy:
+ io_mapping_fini(&mem->iomap);
+
+ return ret;
+}
+
+static const struct intel_memory_region_ops intel_region_lmem_ops = {
+ .init = region_lmem_init,
+ .release = region_lmem_release,
+ .init_object = __i915_gem_ttm_object_init,
+};
+
+static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
+ u64 *start, u32 *size)
+{
+ if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
+ return false;
+
+ *start = 0;
+ *size = SZ_1M;
+
+ drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
+ *start, *start + *size);
+
+ return true;
+}
+
+static int reserve_lowmem_region(struct intel_uncore *uncore,
+ struct intel_memory_region *mem)
+{
+ u64 reserve_start;
+ u32 reserve_size;
+ int ret;
+
+ if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
+ return 0;
+
+ ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
+ if (ret)
+ drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
+
+ return ret;
+}
+
+static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_memory_region *mem;
+ resource_size_t min_page_size;
+ resource_size_t io_start;
+ resource_size_t io_size;
+ resource_size_t lmem_size;
+ int err;
+
+ if (!IS_DGFX(i915))
+ return ERR_PTR(-ENODEV);
+
+ if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
+ return ERR_PTR(-ENXIO);
+
+ if (HAS_FLAT_CCS(i915)) {
+ resource_size_t lmem_range;
+ u64 tile_stolen, flat_ccs_base;
+
+ lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
+ lmem_size *= SZ_1G;
+
+ flat_ccs_base = intel_gt_mcr_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
+ flat_ccs_base = (flat_ccs_base >> XEHP_CCS_BASE_SHIFT) * SZ_64K;
+
+ if (GEM_WARN_ON(lmem_size < flat_ccs_base))
+ return ERR_PTR(-EIO);
+
+ tile_stolen = lmem_size - flat_ccs_base;
+
+ /* If the FLAT_CCS_BASE_ADDR register is not populated, flag an error */
+ if (tile_stolen == lmem_size)
+ drm_err(&i915->drm,
+ "CCS_BASE_ADDR register did not have expected value\n");
+
+ lmem_size -= tile_stolen;
+ } else {
+ /* Stolen starts from GSMBASE without CCS */
+ lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
+ }
+
+ i915_resize_lmem_bar(i915, lmem_size);
+
+ if (i915->params.lmem_size > 0) {
+ lmem_size = min_t(resource_size_t, lmem_size,
+ mul_u32_u32(i915->params.lmem_size, SZ_1M));
+ }
+
+ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
+ io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size);
+ if (!io_size)
+ return ERR_PTR(-EIO);
+
+ min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
+ I915_GTT_PAGE_SIZE_4K;
+ mem = intel_memory_region_create(i915,
+ 0,
+ lmem_size,
+ min_page_size,
+ io_start,
+ io_size,
+ INTEL_MEMORY_LOCAL,
+ 0,
+ &intel_region_lmem_ops);
+ if (IS_ERR(mem))
+ return mem;
+
+ err = reserve_lowmem_region(uncore, mem);
+ if (err)
+ goto err_region_put;
+
+ drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
+ drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
+ &mem->io_start);
+ drm_info(&i915->drm, "Local memory IO size: %pa\n",
+ &mem->io_size);
+ drm_info(&i915->drm, "Local memory available: %pa\n",
+ &lmem_size);
+
+ if (io_size < lmem_size)
+ drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
+ (u64)io_size >> 20);
+
+ return mem;
+
+err_region_put:
+ intel_memory_region_destroy(mem);
+ return ERR_PTR(err);
+}
+
+struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
+{
+ return setup_lmem(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
new file mode 100644
index 000000000..1438576b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_REGION_LMEM_H
+#define __INTEL_REGION_LMEM_H
+
+struct intel_gt;
+
+struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt);
+
+#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
new file mode 100644
index 000000000..5121e6dc2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include "gem/i915_gem_internal.h"
+
+#include "i915_drv.h"
+#include "intel_renderstate.h"
+#include "intel_context.h"
+#include "intel_gpu_commands.h"
+#include "intel_ring.h"
+
+static const struct intel_renderstate_rodata *
+render_state_get_rodata(const struct intel_engine_cs *engine)
+{
+ if (engine->class != RENDER_CLASS)
+ return NULL;
+
+ switch (GRAPHICS_VER(engine->i915)) {
+ case 6:
+ return &gen6_null_state;
+ case 7:
+ return &gen7_null_state;
+ case 8:
+ return &gen8_null_state;
+ case 9:
+ return &gen9_null_state;
+ }
+
+ return NULL;
+}
+
+/*
+ * Macro to add commands to auxiliary batch.
+ * This macro only checks for page overflow before inserting the commands,
+ * this is sufficient as the null state generator makes the final batch
+ * with two passes to build command and state separately. At this point
+ * the size of both are known and it compacts them by relocating the state
+ * right after the commands taking care of alignment so we should sufficient
+ * space below them for adding new commands.
+ */
+#define OUT_BATCH(batch, i, val) \
+ do { \
+ if ((i) >= PAGE_SIZE / sizeof(u32)) \
+ goto out; \
+ (batch)[(i)++] = (val); \
+ } while (0)
+
+static int render_state_setup(struct intel_renderstate *so,
+ struct drm_i915_private *i915)
+{
+ const struct intel_renderstate_rodata *rodata = so->rodata;
+ unsigned int i = 0, reloc_index = 0;
+ int ret = -EINVAL;
+ u32 *d;
+
+ d = i915_gem_object_pin_map(so->vma->obj, I915_MAP_WB);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+
+ while (i < rodata->batch_items) {
+ u32 s = rodata->batch[i];
+
+ if (i * 4 == rodata->reloc[reloc_index]) {
+ u64 r = s + so->vma->node.start;
+
+ s = lower_32_bits(r);
+ if (HAS_64BIT_RELOC(i915)) {
+ if (i + 1 >= rodata->batch_items ||
+ rodata->batch[i + 1] != 0)
+ goto out;
+
+ d[i++] = s;
+ s = upper_32_bits(r);
+ }
+
+ reloc_index++;
+ }
+
+ d[i++] = s;
+ }
+
+ if (rodata->reloc[reloc_index] != -1) {
+ drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
+ goto out;
+ }
+
+ so->batch_offset = i915_ggtt_offset(so->vma);
+ so->batch_size = rodata->batch_items * sizeof(u32);
+
+ while (i % CACHELINE_DWORDS)
+ OUT_BATCH(d, i, MI_NOOP);
+
+ so->aux_offset = i * sizeof(u32);
+
+ if (HAS_POOLED_EU(i915)) {
+ /*
+ * We always program 3x6 pool config but depending upon which
+ * subslice is disabled HW drops down to appropriate config
+ * shown below.
+ *
+ * In the below table 2x6 config always refers to
+ * fused-down version, native 2x6 is not available and can
+ * be ignored
+ *
+ * SNo subslices config eu pool configuration
+ * -----------------------------------------------------------
+ * 1 3 subslices enabled (3x6) - 0x00777000 (9+9)
+ * 2 ss0 disabled (2x6) - 0x00777000 (3+9)
+ * 3 ss1 disabled (2x6) - 0x00770000 (6+6)
+ * 4 ss2 disabled (2x6) - 0x00007000 (9+3)
+ */
+ u32 eu_pool_config = 0x00777000;
+
+ OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
+ OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
+ OUT_BATCH(d, i, eu_pool_config);
+ OUT_BATCH(d, i, 0);
+ OUT_BATCH(d, i, 0);
+ OUT_BATCH(d, i, 0);
+ }
+
+ OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
+ so->aux_size = i * sizeof(u32) - so->aux_offset;
+ so->aux_offset += so->batch_offset;
+ /*
+ * Since we are sending length, we need to strictly conform to
+ * all requirements. For Gen2 this must be a multiple of 8.
+ */
+ so->aux_size = ALIGN(so->aux_size, 8);
+
+ ret = 0;
+out:
+ __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
+ __i915_gem_object_release_map(so->vma->obj);
+ return ret;
+}
+
+#undef OUT_BATCH
+
+int intel_renderstate_init(struct intel_renderstate *so,
+ struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct drm_i915_gem_object *obj = NULL;
+ int err;
+
+ memset(so, 0, sizeof(*so));
+
+ so->rodata = render_state_get_rodata(engine);
+ if (so->rodata) {
+ if (so->rodata->batch_items * 4 > PAGE_SIZE)
+ return -EINVAL;
+
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(so->vma)) {
+ err = PTR_ERR(so->vma);
+ goto err_obj;
+ }
+ }
+
+ i915_gem_ww_ctx_init(&so->ww, true);
+retry:
+ err = intel_context_pin_ww(ce, &so->ww);
+ if (err)
+ goto err_fini;
+
+ /* return early if there's nothing to setup */
+ if (!err && !so->rodata)
+ return 0;
+
+ err = i915_gem_object_lock(so->vma->obj, &so->ww);
+ if (err)
+ goto err_context;
+
+ err = i915_vma_pin_ww(so->vma, &so->ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err_context;
+
+ err = render_state_setup(so, engine->i915);
+ if (err)
+ goto err_unpin;
+
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(so->vma);
+err_context:
+ intel_context_unpin(ce);
+err_fini:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&so->ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&so->ww);
+err_obj:
+ if (obj)
+ i915_gem_object_put(obj);
+ so->vma = NULL;
+ return err;
+}
+
+int intel_renderstate_emit(struct intel_renderstate *so,
+ struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int err;
+
+ if (!so->vma)
+ return 0;
+
+ err = i915_request_await_object(rq, so->vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(so->vma, rq, 0);
+ if (err)
+ return err;
+
+ err = engine->emit_bb_start(rq,
+ so->batch_offset, so->batch_size,
+ I915_DISPATCH_SECURE);
+ if (err)
+ return err;
+
+ if (so->aux_size > 8) {
+ err = engine->emit_bb_start(rq,
+ so->aux_offset, so->aux_size,
+ I915_DISPATCH_SECURE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void intel_renderstate_fini(struct intel_renderstate *so,
+ struct intel_context *ce)
+{
+ if (so->vma) {
+ i915_vma_unpin(so->vma);
+ i915_vma_close(so->vma);
+ }
+
+ intel_context_unpin(ce);
+ i915_gem_ww_ctx_fini(&so->ww);
+
+ if (so->vma)
+ i915_gem_object_put(so->vma->obj);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
new file mode 100644
index 000000000..4da4c5234
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#ifndef _INTEL_RENDERSTATE_H_
+#define _INTEL_RENDERSTATE_H_
+
+#include <linux/types.h>
+#include "i915_gem.h"
+#include "i915_gem_ww.h"
+
+struct i915_request;
+struct intel_context;
+struct i915_vma;
+
+struct intel_renderstate_rodata {
+ const u32 *reloc;
+ const u32 *batch;
+ const u32 batch_items;
+};
+
+#define RO_RENDERSTATE(_g) \
+ const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
+ .reloc = gen ## _g ## _null_state_relocs, \
+ .batch = gen ## _g ## _null_state_batch, \
+ .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
+ }
+
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
+
+struct intel_renderstate {
+ struct i915_gem_ww_ctx ww;
+ const struct intel_renderstate_rodata *rodata;
+ struct i915_vma *vma;
+ u32 batch_offset;
+ u32 batch_size;
+ u32 aux_offset;
+ u32 aux_size;
+};
+
+int intel_renderstate_init(struct intel_renderstate *so,
+ struct intel_context *ce);
+int intel_renderstate_emit(struct intel_renderstate *so,
+ struct i915_request *rq);
+void intel_renderstate_fini(struct intel_renderstate *so,
+ struct intel_context *ce);
+
+#endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
new file mode 100644
index 000000000..10b930eaa
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -0,0 +1,1557 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2008-2018 Intel Corporation
+ */
+
+#include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
+#include <linux/string_helpers.h>
+
+#include "display/intel_display.h"
+#include "display/intel_overlay.h"
+
+#include "gem/i915_gem_context.h"
+
+#include "gt/intel_gt_regs.h"
+
+#include "i915_drv.h"
+#include "i915_file_private.h"
+#include "i915_gpu_error.h"
+#include "i915_irq.h"
+#include "intel_breadcrumbs.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_regs.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pci_config.h"
+#include "intel_reset.h"
+
+#include "uc/intel_guc.h"
+
+#define RESET_MAX_RETRIES 3
+
+/* XXX How to handle concurrent GGTT updates using tiling registers? */
+#define RESET_UNDER_STOP_MACHINE 0
+
+static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw_fw(uncore, reg, 0, set);
+}
+
+static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw_fw(uncore, reg, clr, 0);
+}
+
+static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
+{
+ struct drm_i915_file_private *file_priv = ctx->file_priv;
+ unsigned long prev_hang;
+ unsigned int score;
+
+ if (IS_ERR_OR_NULL(file_priv))
+ return;
+
+ score = 0;
+ if (banned)
+ score = I915_CLIENT_SCORE_CONTEXT_BAN;
+
+ prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+ if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+ score += I915_CLIENT_SCORE_HANG_FAST;
+
+ if (score) {
+ atomic_add(score, &file_priv->ban_score);
+
+ drm_dbg(&ctx->i915->drm,
+ "client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
+ }
+}
+
+static bool mark_guilty(struct i915_request *rq)
+{
+ struct i915_gem_context *ctx;
+ unsigned long prev_hang;
+ bool banned;
+ int i;
+
+ if (intel_context_is_closed(rq->context))
+ return true;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (!ctx)
+ return intel_context_is_banned(rq->context);
+
+ atomic_inc(&ctx->guilty_count);
+
+ /* Cool contexts are too cool to be banned! (Used for reset testing.) */
+ if (!i915_gem_context_is_bannable(ctx)) {
+ banned = false;
+ goto out;
+ }
+
+ drm_notice(&ctx->i915->drm,
+ "%s context reset due to GPU hang\n",
+ ctx->name);
+
+ /* Record the timestamp for the last N hangs */
+ prev_hang = ctx->hang_timestamp[0];
+ for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
+ ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
+ ctx->hang_timestamp[i] = jiffies;
+
+ /* If we have hung N+1 times in rapid succession, we ban the context! */
+ banned = !i915_gem_context_is_recoverable(ctx);
+ if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
+ banned = true;
+ if (banned)
+ drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count));
+
+ client_mark_guilty(ctx, banned);
+
+out:
+ i915_gem_context_put(ctx);
+ return banned;
+}
+
+static void mark_innocent(struct i915_request *rq)
+{
+ struct i915_gem_context *ctx;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ atomic_inc(&ctx->active_count);
+ rcu_read_unlock();
+}
+
+void __i915_request_reset(struct i915_request *rq, bool guilty)
+{
+ bool banned = false;
+
+ RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
+ GEM_BUG_ON(__i915_request_is_complete(rq));
+
+ rcu_read_lock(); /* protect the GEM context */
+ if (guilty) {
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
+ banned = mark_guilty(rq);
+ } else {
+ i915_request_set_error_once(rq, -EAGAIN);
+ mark_innocent(rq);
+ }
+ rcu_read_unlock();
+
+ if (banned)
+ intel_context_ban(rq->context, rq);
+}
+
+static bool i915_in_reset(struct pci_dev *pdev)
+{
+ u8 gdrst;
+
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
+ return gdrst & GRDOM_RESET_STATUS;
+}
+
+static int i915_do_reset(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
+ int err;
+
+ /* Assert reset for at least 20 usec, and wait for acknowledgement. */
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ udelay(50);
+ err = wait_for_atomic(i915_in_reset(pdev), 50);
+
+ /* Clear the reset request. */
+ pci_write_config_byte(pdev, I915_GDRST, 0);
+ udelay(50);
+ if (!err)
+ err = wait_for_atomic(!i915_in_reset(pdev), 50);
+
+ return err;
+}
+
+static bool g4x_reset_complete(struct pci_dev *pdev)
+{
+ u8 gdrst;
+
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int g33_do_reset(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
+
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for_atomic(g4x_reset_complete(pdev), 50);
+}
+
+static int g4x_do_reset(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
+
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
+ if (ret) {
+ GT_TRACE(gt, "Wait for media reset failed\n");
+ goto out;
+ }
+
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
+ if (ret) {
+ GT_TRACE(gt, "Wait for render reset failed\n");
+ goto out;
+ }
+
+out:
+ pci_write_config_byte(pdev, I915_GDRST, 0);
+
+ rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
+
+ return ret;
+}
+
+static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ intel_uncore_write_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_RESET_ENABLE, 0,
+ 5000, 0,
+ NULL);
+ if (ret) {
+ GT_TRACE(gt, "Wait for render reset failed\n");
+ goto out;
+ }
+
+ intel_uncore_write_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_RESET_ENABLE, 0,
+ 5000, 0,
+ NULL);
+ if (ret) {
+ GT_TRACE(gt, "Wait for media reset failed\n");
+ goto out;
+ }
+
+out:
+ intel_uncore_write_fw(uncore, ILK_GDSR, 0);
+ intel_uncore_posting_read_fw(uncore, ILK_GDSR);
+ return ret;
+}
+
+/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
+static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ int loops = 2;
+ int err;
+
+ /*
+ * GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ do {
+ intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
+
+ /*
+ * Wait for the device to ack the reset requests.
+ *
+ * On some platforms, e.g. Jasperlake, we see that the
+ * engine register state is not cleared until shortly after
+ * GDRST reports completion, causing a failure as we try
+ * to immediately resume while the internal state is still
+ * in flux. If we immediately repeat the reset, the second
+ * reset appears to serialise with the first, and since
+ * it is a no-op, the registers should retain their reset
+ * value. However, there is still a concern that upon
+ * leaving the second reset, the internal engine state
+ * is still in flux and not ready for resuming.
+ */
+ err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
+ hw_domain_mask, 0,
+ 2000, 0,
+ NULL);
+ } while (err == 0 && --loops);
+ if (err)
+ GT_TRACE(gt,
+ "Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
+
+ /*
+ * As we have observed that the engine state is still volatile
+ * after GDRST is acked, impose a small delay to let everything settle.
+ */
+ udelay(50);
+
+ return err;
+}
+
+static int __gen6_reset_engines(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ struct intel_engine_cs *engine;
+ u32 hw_mask;
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN6_GRDOM_FULL;
+ } else {
+ intel_engine_mask_t tmp;
+
+ hw_mask = 0;
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
+ hw_mask |= engine->reset_domain;
+ }
+ }
+
+ return gen6_hw_domain_reset(gt, hw_mask);
+}
+
+static int gen6_reset_engines(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&gt->uncore->lock, flags);
+ ret = __gen6_reset_engines(gt, engine_mask, retry);
+ spin_unlock_irqrestore(&gt->uncore->lock, flags);
+
+ return ret;
+}
+
+static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
+{
+ int vecs_id;
+
+ GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
+
+ vecs_id = _VECS((engine->instance) / 2);
+
+ return engine->gt->engine[vecs_id];
+}
+
+struct sfc_lock_data {
+ i915_reg_t lock_reg;
+ i915_reg_t ack_reg;
+ i915_reg_t usage_reg;
+ u32 lock_bit;
+ u32 ack_bit;
+ u32 usage_bit;
+ u32 reset_bit;
+};
+
+static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
+ struct sfc_lock_data *sfc_lock)
+{
+ switch (engine->class) {
+ default:
+ MISSING_CASE(engine->class);
+ fallthrough;
+ case VIDEO_DECODE_CLASS:
+ sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
+ sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+
+ sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
+ sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
+
+ sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
+ sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
+ sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
+
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+ sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
+ sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+
+ sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
+ sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
+
+ sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
+ sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
+ sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
+
+ break;
+ }
+}
+
+static int gen11_lock_sfc(struct intel_engine_cs *engine,
+ u32 *reset_mask,
+ u32 *unlock_mask)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
+ struct sfc_lock_data sfc_lock;
+ bool lock_obtained, lock_to_other = false;
+ int ret;
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
+ return 0;
+
+ fallthrough;
+ case VIDEO_ENHANCEMENT_CLASS:
+ get_sfc_forced_lock_data(engine, &sfc_lock);
+
+ break;
+ default:
+ return 0;
+ }
+
+ if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
+ struct intel_engine_cs *paired_vecs;
+
+ if (engine->class != VIDEO_DECODE_CLASS ||
+ GRAPHICS_VER(engine->i915) != 12)
+ return 0;
+
+ /*
+ * Wa_14010733141
+ *
+ * If the VCS-MFX isn't using the SFC, we also need to check
+ * whether VCS-HCP is using it. If so, we need to issue a *VE*
+ * forced lock on the VE engine that shares the same SFC.
+ */
+ if (!(intel_uncore_read_fw(uncore,
+ GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
+ GEN12_HCP_SFC_USAGE_BIT))
+ return 0;
+
+ paired_vecs = find_sfc_paired_vecs_engine(engine);
+ get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
+ lock_to_other = true;
+ *unlock_mask |= paired_vecs->mask;
+ } else {
+ *unlock_mask |= engine->mask;
+ }
+
+ /*
+ * If the engine is using an SFC, tell the engine that a software reset
+ * is going to happen. The engine will then try to force lock the SFC.
+ * If SFC ends up being locked to the engine we want to reset, we have
+ * to reset it as well (we will unlock it once the reset sequence is
+ * completed).
+ */
+ rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
+
+ ret = __intel_wait_for_register_fw(uncore,
+ sfc_lock.ack_reg,
+ sfc_lock.ack_bit,
+ sfc_lock.ack_bit,
+ 1000, 0, NULL);
+
+ /*
+ * Was the SFC released while we were trying to lock it?
+ *
+ * We should reset both the engine and the SFC if:
+ * - We were locking the SFC to this engine and the lock succeeded
+ * OR
+ * - We were locking the SFC to a different engine (Wa_14010733141)
+ * but the SFC was released before the lock was obtained.
+ *
+ * Otherwise we need only reset the engine by itself and we can
+ * leave the SFC alone.
+ */
+ lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
+ sfc_lock.usage_bit) != 0;
+ if (lock_obtained == lock_to_other)
+ return 0;
+
+ if (ret) {
+ ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
+ return ret;
+ }
+
+ *reset_mask |= sfc_lock.reset_bit;
+ return 0;
+}
+
+static void gen11_unlock_sfc(struct intel_engine_cs *engine)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
+ struct sfc_lock_data sfc_lock = {};
+
+ if (engine->class != VIDEO_DECODE_CLASS &&
+ engine->class != VIDEO_ENHANCEMENT_CLASS)
+ return;
+
+ if (engine->class == VIDEO_DECODE_CLASS &&
+ (BIT(engine->instance) & vdbox_sfc_access) == 0)
+ return;
+
+ get_sfc_forced_lock_data(engine, &sfc_lock);
+
+ rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
+}
+
+static int __gen11_reset_engines(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ struct intel_engine_cs *engine;
+ intel_engine_mask_t tmp;
+ u32 reset_mask, unlock_mask = 0;
+ int ret;
+
+ if (engine_mask == ALL_ENGINES) {
+ reset_mask = GEN11_GRDOM_FULL;
+ } else {
+ reset_mask = 0;
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
+ reset_mask |= engine->reset_domain;
+ ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
+ if (ret)
+ goto sfc_unlock;
+ }
+ }
+
+ ret = gen6_hw_domain_reset(gt, reset_mask);
+
+sfc_unlock:
+ /*
+ * We unlock the SFC based on the lock status and not the result of
+ * gen11_lock_sfc to make sure that we clean properly if something
+ * wrong happened during the lock (e.g. lock acquired after timeout
+ * expiration).
+ *
+ * Due to Wa_14010733141, we may have locked an SFC to an engine that
+ * wasn't being reset. So instead of calling gen11_unlock_sfc()
+ * on engine_mask, we instead call it on the mask of engines that our
+ * gen11_lock_sfc() calls told us actually had locks attempted.
+ */
+ for_each_engine_masked(engine, gt, unlock_mask, tmp)
+ gen11_unlock_sfc(engine);
+
+ return ret;
+}
+
+static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
+ u32 request, mask, ack;
+ int ret;
+
+ if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
+ return -ETIMEDOUT;
+
+ ack = intel_uncore_read_fw(uncore, reg);
+ if (ack & RESET_CTL_CAT_ERROR) {
+ /*
+ * For catastrophic errors, ready-for-reset sequence
+ * needs to be bypassed: HAS#396813
+ */
+ request = RESET_CTL_CAT_ERROR;
+ mask = RESET_CTL_CAT_ERROR;
+
+ /* Catastrophic errors need to be cleared by HW */
+ ack = 0;
+ } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
+ request = RESET_CTL_REQUEST_RESET;
+ mask = RESET_CTL_READY_TO_RESET;
+ ack = RESET_CTL_READY_TO_RESET;
+ } else {
+ return 0;
+ }
+
+ intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
+ ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
+ 700, 0, NULL);
+ if (ret)
+ drm_err(&engine->i915->drm,
+ "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+ engine->name, request,
+ intel_uncore_read_fw(uncore, reg));
+
+ return ret;
+}
+
+static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
+{
+ intel_uncore_write_fw(engine->uncore,
+ RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+}
+
+static int gen8_reset_engines(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ struct intel_engine_cs *engine;
+ const bool reset_non_ready = retry >= 1;
+ intel_engine_mask_t tmp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&gt->uncore->lock, flags);
+
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
+ ret = gen8_engine_reset_prepare(engine);
+ if (ret && !reset_non_ready)
+ goto skip_reset;
+
+ /*
+ * If this is not the first failed attempt to prepare,
+ * we decide to proceed anyway.
+ *
+ * By doing so we risk context corruption and with
+ * some gens (kbl), possible system hang if reset
+ * happens during active bb execution.
+ *
+ * We rather take context corruption instead of
+ * failed reset with a wedged driver/gpu. And
+ * active bb execution case should be covered by
+ * stop_engines() we have before the reset.
+ */
+ }
+
+ /*
+ * Wa_22011100796:dg2, whenever Full soft reset is required,
+ * reset all individual engines firstly, and then do a full soft reset.
+ *
+ * This is best effort, so ignore any error from the initial reset.
+ */
+ if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
+ __gen11_reset_engines(gt, gt->info.engine_mask, 0);
+
+ if (GRAPHICS_VER(gt->i915) >= 11)
+ ret = __gen11_reset_engines(gt, engine_mask, retry);
+ else
+ ret = __gen6_reset_engines(gt, engine_mask, retry);
+
+skip_reset:
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
+ gen8_engine_reset_cancel(engine);
+
+ spin_unlock_irqrestore(&gt->uncore->lock, flags);
+
+ return ret;
+}
+
+static int mock_reset(struct intel_gt *gt,
+ intel_engine_mask_t mask,
+ unsigned int retry)
+{
+ return 0;
+}
+
+typedef int (*reset_func)(struct intel_gt *,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry);
+
+static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (is_mock_gt(gt))
+ return mock_reset;
+ else if (GRAPHICS_VER(i915) >= 8)
+ return gen8_reset_engines;
+ else if (GRAPHICS_VER(i915) >= 6)
+ return gen6_reset_engines;
+ else if (GRAPHICS_VER(i915) >= 5)
+ return ilk_do_reset;
+ else if (IS_G4X(i915))
+ return g4x_do_reset;
+ else if (IS_G33(i915) || IS_PINEVIEW(i915))
+ return g33_do_reset;
+ else if (GRAPHICS_VER(i915) >= 3)
+ return i915_do_reset;
+ else
+ return NULL;
+}
+
+int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+{
+ const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
+ reset_func reset;
+ int ret = -ETIMEDOUT;
+ int retry;
+
+ reset = intel_get_gpu_reset(gt);
+ if (!reset)
+ return -ENODEV;
+
+ /*
+ * If the power well sleeps during the reset, the reset
+ * request may be dropped and never completes (causing -EIO).
+ */
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
+ GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
+ preempt_disable();
+ ret = reset(gt, engine_mask, retry);
+ preempt_enable();
+ }
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+bool intel_has_gpu_reset(const struct intel_gt *gt)
+{
+ if (!gt->i915->params.reset)
+ return NULL;
+
+ return intel_get_gpu_reset(gt);
+}
+
+bool intel_has_reset_engine(const struct intel_gt *gt)
+{
+ if (gt->i915->params.reset < 2)
+ return false;
+
+ return INTEL_INFO(gt->i915)->has_reset_engine;
+}
+
+int intel_reset_guc(struct intel_gt *gt)
+{
+ u32 guc_domain =
+ GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+ int ret;
+
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ ret = gen6_hw_domain_reset(gt, guc_domain);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/*
+ * Ensure irq handler finishes, and not run again.
+ * Also return the active request so that we only search for it once.
+ */
+static void reset_prepare_engine(struct intel_engine_cs *engine)
+{
+ /*
+ * During the reset sequence, we must prevent the engine from
+ * entering RC6. As the context state is undefined until we restart
+ * the engine, if it does enter RC6 during the reset, the state
+ * written to the powercontext is undefined and so we may lose
+ * GPU state upon resume, i.e. fail to restart after a reset.
+ */
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
+}
+
+static void revoke_mmaps(struct intel_gt *gt)
+{
+ int i;
+
+ for (i = 0; i < gt->ggtt->num_fences; i++) {
+ struct drm_vma_offset_node *node;
+ struct i915_vma *vma;
+ u64 vma_offset;
+
+ vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
+ if (!vma)
+ continue;
+
+ if (!i915_vma_has_userfault(vma))
+ continue;
+
+ GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
+
+ if (!vma->mmo)
+ continue;
+
+ node = &vma->mmo->vma_node;
+ vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
+
+ unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
+ drm_vma_node_offset_addr(node) + vma_offset,
+ vma->size,
+ 1);
+ }
+}
+
+static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ intel_engine_mask_t awake = 0;
+ enum intel_engine_id id;
+
+ /* For GuC mode, ensure submission is disabled before stopping ring */
+ intel_uc_reset_prepare(&gt->uc);
+
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_pm_get_if_awake(engine))
+ awake |= engine->mask;
+ reset_prepare_engine(engine);
+ }
+
+ return awake;
+}
+
+static void gt_revoke(struct intel_gt *gt)
+{
+ revoke_mmaps(gt);
+}
+
+static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ /*
+ * Everything depends on having the GTT running, so we need to start
+ * there.
+ */
+ err = i915_ggtt_enable_hw(gt->i915);
+ if (err)
+ return err;
+
+ local_bh_disable();
+ for_each_engine(engine, gt, id)
+ __intel_engine_reset(engine, stalled_mask & engine->mask);
+ local_bh_enable();
+
+ intel_uc_reset(&gt->uc, ALL_ENGINES);
+
+ intel_ggtt_restore_fences(gt->ggtt);
+
+ return err;
+}
+
+static void reset_finish_engine(struct intel_engine_cs *engine)
+{
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_signal_breadcrumbs(engine);
+}
+
+static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id) {
+ reset_finish_engine(engine);
+ if (awake & engine->mask)
+ intel_engine_pm_put(engine);
+ }
+
+ intel_uc_reset_finish(&gt->uc);
+}
+
+static void nop_submit_request(struct i915_request *request)
+{
+ RQ_TRACE(request, "-EIO\n");
+
+ request = i915_request_mark_eio(request);
+ if (request) {
+ i915_request_submit(request);
+ intel_engine_signal_breadcrumbs(request->engine);
+
+ i915_request_put(request);
+ }
+}
+
+static void __intel_gt_set_wedged(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ intel_engine_mask_t awake;
+ enum intel_engine_id id;
+
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
+ return;
+
+ GT_TRACE(gt, "start\n");
+
+ /*
+ * First, stop submission to hw, but do not yet complete requests by
+ * rolling the global seqno forward (since this would complete requests
+ * for which we haven't set the fence error to EIO yet).
+ */
+ awake = reset_prepare(gt);
+
+ /* Even if the GPU reset fails, it should still stop the engines */
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(gt, ALL_ENGINES);
+
+ for_each_engine(engine, gt, id)
+ engine->submit_request = nop_submit_request;
+
+ /*
+ * Make sure no request can slip through without getting completed by
+ * either this call here to intel_engine_write_global_seqno, or the one
+ * in nop_submit_request.
+ */
+ synchronize_rcu_expedited();
+ set_bit(I915_WEDGED, &gt->reset.flags);
+
+ /* Mark all executing requests as skipped */
+ local_bh_disable();
+ for_each_engine(engine, gt, id)
+ if (engine->reset.cancel)
+ engine->reset.cancel(engine);
+ intel_uc_cancel_requests(&gt->uc);
+ local_bh_enable();
+
+ reset_finish(gt, awake);
+
+ GT_TRACE(gt, "end\n");
+}
+
+void intel_gt_set_wedged(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
+ return;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ mutex_lock(&gt->reset.mutex);
+
+ if (GEM_SHOW_DEBUG()) {
+ struct drm_printer p = drm_debug_printer(__func__);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_is_idle(engine))
+ continue;
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+ }
+
+ __intel_gt_set_wedged(gt);
+
+ mutex_unlock(&gt->reset.mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+static bool __intel_gt_unset_wedged(struct intel_gt *gt)
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl;
+ bool ok;
+
+ if (!test_bit(I915_WEDGED, &gt->reset.flags))
+ return true;
+
+ /* Never fully initialised, recovery impossible */
+ if (intel_gt_has_unrecoverable_error(gt))
+ return false;
+
+ GT_TRACE(gt, "start\n");
+
+ /*
+ * Before unwedging, make sure that all pending operations
+ * are flushed and errored out - we may have requests waiting upon
+ * third party fences. We marked all inflight requests as EIO, and
+ * every execbuf since returned EIO, for consistency we want all
+ * the currently pending requests to also be marked as EIO, which
+ * is done inside our nop_submit_request - and so we must wait.
+ *
+ * No more can be submitted until we reset the wedged bit.
+ */
+ spin_lock(&timelines->lock);
+ list_for_each_entry(tl, &timelines->active_list, link) {
+ struct dma_fence *fence;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
+ continue;
+
+ spin_unlock(&timelines->lock);
+
+ /*
+ * All internal dependencies (i915_requests) will have
+ * been flushed by the set-wedge, but we may be stuck waiting
+ * for external fences. These should all be capped to 10s
+ * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
+ * in the worst case.
+ */
+ dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(fence);
+
+ /* Restart iteration after droping lock */
+ spin_lock(&timelines->lock);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
+ }
+ spin_unlock(&timelines->lock);
+
+ /* We must reset pending GPU events before restoring our submission */
+ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ if (!ok) {
+ /*
+ * Warn CI about the unrecoverable wedged condition.
+ * Time for a reboot.
+ */
+ add_taint_for_CI(gt->i915, TAINT_WARN);
+ return false;
+ }
+
+ /*
+ * Undo nop_submit_request. We prevent all new i915 requests from
+ * being queued (by disallowing execbuf whilst wedged) so having
+ * waited for all active requests above, we know the system is idle
+ * and do not have to worry about a thread being inside
+ * engine->submit_request() as we swap over. So unlike installing
+ * the nop_submit_request on reset, we can do this from normal
+ * context and do not require stop_machine().
+ */
+ intel_engines_reset_default_submission(gt);
+
+ GT_TRACE(gt, "end\n");
+
+ smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
+ clear_bit(I915_WEDGED, &gt->reset.flags);
+
+ return true;
+}
+
+bool intel_gt_unset_wedged(struct intel_gt *gt)
+{
+ bool result;
+
+ mutex_lock(&gt->reset.mutex);
+ result = __intel_gt_unset_wedged(gt);
+ mutex_unlock(&gt->reset.mutex);
+
+ return result;
+}
+
+static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
+{
+ int err, i;
+
+ err = __intel_gt_reset(gt, ALL_ENGINES);
+ for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
+ msleep(10 * (i + 1));
+ err = __intel_gt_reset(gt, ALL_ENGINES);
+ }
+ if (err)
+ return err;
+
+ return gt_reset(gt, stalled_mask);
+}
+
+static int resume(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int ret;
+
+ for_each_engine(engine, gt, id) {
+ ret = intel_engine_resume(engine);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_gt_reset - reset chip after a hang
+ * @gt: #intel_gt to reset
+ * @stalled_mask: mask of the stalled engines with the guilty requests
+ * @reason: user error message for why we are resetting
+ *
+ * Reset the chip. Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Procedure is fairly simple:
+ * - reset the chip using the reset reg
+ * - re-init context state
+ * - re-init hardware status page
+ * - re-init ring buffer
+ * - re-init interrupt state
+ * - re-init display
+ */
+void intel_gt_reset(struct intel_gt *gt,
+ intel_engine_mask_t stalled_mask,
+ const char *reason)
+{
+ intel_engine_mask_t awake;
+ int ret;
+
+ GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
+
+ might_sleep();
+ GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+
+ /*
+ * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
+ * critical section like gpu reset.
+ */
+ gt_revoke(gt);
+
+ mutex_lock(&gt->reset.mutex);
+
+ /* Clear any previous failed attempts at recovery. Time to try again. */
+ if (!__intel_gt_unset_wedged(gt))
+ goto unlock;
+
+ if (reason)
+ drm_notice(&gt->i915->drm,
+ "Resetting chip for %s\n", reason);
+ atomic_inc(&gt->i915->gpu_error.reset_count);
+
+ awake = reset_prepare(gt);
+
+ if (!intel_has_gpu_reset(gt)) {
+ if (gt->i915->params.reset)
+ drm_err(&gt->i915->drm, "GPU reset not supported\n");
+ else
+ drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
+ goto error;
+ }
+
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_disable_interrupts(gt->i915);
+
+ if (do_reset(gt, stalled_mask)) {
+ drm_err(&gt->i915->drm, "Failed to reset chip\n");
+ goto taint;
+ }
+
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_enable_interrupts(gt->i915);
+
+ intel_overlay_reset(gt->i915);
+
+ /*
+ * Next we need to restore the context, but we don't use those
+ * yet either...
+ *
+ * Ring buffer needs to be re-initialized in the KMS case, or if X
+ * was running at the time of the reset (i.e. we weren't VT
+ * switched away).
+ */
+ ret = intel_gt_init_hw(gt);
+ if (ret) {
+ drm_err(&gt->i915->drm,
+ "Failed to initialise HW following reset (%d)\n",
+ ret);
+ goto taint;
+ }
+
+ ret = resume(gt);
+ if (ret)
+ goto taint;
+
+finish:
+ reset_finish(gt, awake);
+unlock:
+ mutex_unlock(&gt->reset.mutex);
+ return;
+
+taint:
+ /*
+ * History tells us that if we cannot reset the GPU now, we
+ * never will. This then impacts everything that is run
+ * subsequently. On failing the reset, we mark the driver
+ * as wedged, preventing further execution on the GPU.
+ * We also want to go one step further and add a taint to the
+ * kernel so that any subsequent faults can be traced back to
+ * this failure. This is important for CI, where if the
+ * GPU/driver fails we would like to reboot and restart testing
+ * rather than continue on into oblivion. For everyone else,
+ * the system should still plod along, but they have been warned!
+ */
+ add_taint_for_CI(gt->i915, TAINT_WARN);
+error:
+ __intel_gt_set_wedged(gt);
+ goto finish;
+}
+
+static int intel_gt_reset_engine(struct intel_engine_cs *engine)
+{
+ return __intel_gt_reset(engine->gt, engine->mask);
+}
+
+int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
+{
+ struct intel_gt *gt = engine->gt;
+ int ret;
+
+ ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
+
+ if (intel_engine_uses_guc(engine))
+ return -ENODEV;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+ reset_prepare_engine(engine);
+
+ if (msg)
+ drm_notice(&engine->i915->drm,
+ "Resetting %s for %s\n", engine->name, msg);
+ atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
+
+ ret = intel_gt_reset_engine(engine);
+ if (ret) {
+ /* If we fail here, we expect to fallback to a global reset */
+ ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
+ goto out;
+ }
+
+ /*
+ * The request that caused the hang is stuck on elsp, we know the
+ * active request and can drop it, adjust head to skip the offending
+ * request to resume executing remaining requests in the queue.
+ */
+ __intel_engine_reset(engine, true);
+
+ /*
+ * The engine and its registers (and workarounds in case of render)
+ * have been reset to their default values. Follow the init_ring
+ * process to program RING_MODE, HWSP and re-enable submission.
+ */
+ ret = intel_engine_resume(engine);
+
+out:
+ intel_engine_cancel_stop_cs(engine);
+ reset_finish_engine(engine);
+ intel_engine_pm_put_async(engine);
+ return ret;
+}
+
+/**
+ * intel_engine_reset - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no drm_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
+{
+ int err;
+
+ local_bh_disable();
+ err = __intel_engine_reset_bh(engine, msg);
+ local_bh_enable();
+
+ return err;
+}
+
+static void intel_gt_reset_global(struct intel_gt *gt,
+ u32 engine_mask,
+ const char *reason)
+{
+ struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
+ char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+ char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+ char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+ struct intel_wedge_me w;
+
+ kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
+
+ GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
+ /* Use a watchdog to ensure that our reset completes */
+ intel_wedge_on_timeout(&w, gt, 5 * HZ) {
+ intel_display_prepare_reset(gt->i915);
+
+ intel_gt_reset(gt, engine_mask, reason);
+
+ intel_display_finish_reset(gt->i915);
+ }
+
+ if (!test_bit(I915_WEDGED, &gt->reset.flags))
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
+}
+
+/**
+ * intel_gt_handle_error - handle a gpu error
+ * @gt: the intel_gt
+ * @engine_mask: mask representing engines that are hung
+ * @flags: control flags
+ * @fmt: Error message format string
+ *
+ * Do some basic checking of register state at error time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void intel_gt_handle_error(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned long flags,
+ const char *fmt, ...)
+{
+ struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
+ intel_engine_mask_t tmp;
+ char error_msg[80];
+ char *msg = NULL;
+
+ if (fmt) {
+ va_list args;
+
+ va_start(args, fmt);
+ vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+ va_end(args);
+
+ msg = error_msg;
+ }
+
+ /*
+ * In most cases it's guaranteed that we get here with an RPM
+ * reference held, for example because there is a pending GPU
+ * request that won't finish until the reset is done. This
+ * isn't the case at least when we get here by doing a
+ * simulated reset via debugfs, so get an RPM reference.
+ */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ engine_mask &= gt->info.engine_mask;
+
+ if (flags & I915_ERROR_CAPTURE) {
+ i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
+ intel_gt_clear_error_registers(gt, engine_mask);
+ }
+
+ /*
+ * Try engine reset when available. We fall back to full reset if
+ * single reset fails.
+ */
+ if (!intel_uc_uses_guc_submission(&gt->uc) &&
+ intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ local_bh_disable();
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
+ BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags))
+ continue;
+
+ if (__intel_engine_reset_bh(engine, msg) == 0)
+ engine_mask &= ~engine->mask;
+
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ }
+ local_bh_enable();
+ }
+
+ if (!engine_mask)
+ goto out;
+
+ /* Full reset needs the mutex, stop any other user trying to do so. */
+ if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+ goto out; /* piggy-back on the other reset */
+ }
+
+ /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
+ synchronize_rcu_expedited();
+
+ /*
+ * Prevent any other reset-engine attempt. We don't do this for GuC
+ * submission the GuC owns the per-engine reset, not the i915.
+ */
+ if (!intel_uc_uses_guc_submission(&gt->uc)) {
+ for_each_engine(engine, gt, tmp) {
+ while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags,
+ I915_RESET_ENGINE + engine->id,
+ TASK_UNINTERRUPTIBLE);
+ }
+ }
+
+ /* Flush everyone using a resource about to be clobbered */
+ synchronize_srcu_expedited(&gt->reset.backoff_srcu);
+
+ intel_gt_reset_global(gt, engine_mask, msg);
+
+ if (!intel_uc_uses_guc_submission(&gt->uc)) {
+ for_each_engine(engine, gt, tmp)
+ clear_bit_unlock(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ }
+ clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
+ smp_mb__after_atomic();
+ wake_up_all(&gt->reset.queue);
+
+out:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
+{
+ might_lock(&gt->reset.backoff_srcu);
+ might_sleep();
+
+ rcu_read_lock();
+ while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
+ rcu_read_unlock();
+
+ if (wait_event_interruptible(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &gt->reset.flags)))
+ return -EINTR;
+
+ rcu_read_lock();
+ }
+ *srcu = srcu_read_lock(&gt->reset.backoff_srcu);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
+__releases(&gt->reset.backoff_srcu)
+{
+ srcu_read_unlock(&gt->reset.backoff_srcu, tag);
+}
+
+int intel_gt_terminally_wedged(struct intel_gt *gt)
+{
+ might_sleep();
+
+ if (!intel_gt_is_wedged(gt))
+ return 0;
+
+ if (intel_gt_has_unrecoverable_error(gt))
+ return -EIO;
+
+ /* Reset still in progress? Maybe we will recover? */
+ if (wait_event_interruptible(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &gt->reset.flags)))
+ return -EINTR;
+
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
+}
+
+void intel_gt_set_wedged_on_init(struct intel_gt *gt)
+{
+ BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
+ I915_WEDGED_ON_INIT);
+ intel_gt_set_wedged(gt);
+ i915_disable_error_state(gt->i915, -ENODEV);
+ set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+
+ /* Wedged on init is non-recoverable */
+ add_taint_for_CI(gt->i915, TAINT_WARN);
+}
+
+void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
+{
+ intel_gt_set_wedged(gt);
+ i915_disable_error_state(gt->i915, -ENODEV);
+ set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
+ intel_gt_retire_requests(gt); /* cleanup any wedged requests */
+}
+
+void intel_gt_init_reset(struct intel_gt *gt)
+{
+ init_waitqueue_head(&gt->reset.queue);
+ mutex_init(&gt->reset.mutex);
+ init_srcu_struct(&gt->reset.backoff_srcu);
+
+ /*
+ * While undesirable to wait inside the shrinker, complain anyway.
+ *
+ * If we have to wait during shrinking, we guarantee forward progress
+ * by forcing the reset. Therefore during the reset we must not
+ * re-enter the shrinker. By declaring that we take the reset mutex
+ * within the shrinker, we forbid ourselves from performing any
+ * fs-reclaim or taking related locks during reset.
+ */
+ i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
+
+ /* no GPU until we are ready! */
+ __set_bit(I915_WEDGED, &gt->reset.flags);
+}
+
+void intel_gt_fini_reset(struct intel_gt *gt)
+{
+ cleanup_srcu_struct(&gt->reset.backoff_srcu);
+}
+
+static void intel_wedge_me(struct work_struct *work)
+{
+ struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ drm_err(&w->gt->i915->drm,
+ "%s timed out, cancelling all in-flight rendering.\n",
+ w->name);
+ intel_gt_set_wedged(w->gt);
+}
+
+void __intel_init_wedge(struct intel_wedge_me *w,
+ struct intel_gt *gt,
+ long timeout,
+ const char *name)
+{
+ w->gt = gt;
+ w->name = name;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+void __intel_fini_wedge(struct intel_wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->gt = NULL;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_reset.c"
+#include "selftest_hangcheck.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
new file mode 100644
index 000000000..adc734e67
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2008-2018 Intel Corporation
+ */
+
+#ifndef I915_RESET_H
+#define I915_RESET_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/srcu.h>
+
+#include "intel_engine_types.h"
+#include "intel_reset_types.h"
+
+struct i915_request;
+struct intel_engine_cs;
+struct intel_gt;
+struct intel_guc;
+
+void intel_gt_init_reset(struct intel_gt *gt);
+void intel_gt_fini_reset(struct intel_gt *gt);
+
+__printf(4, 5)
+void intel_gt_handle_error(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned long flags,
+ const char *fmt, ...);
+#define I915_ERROR_CAPTURE BIT(0)
+
+void intel_gt_reset(struct intel_gt *gt,
+ intel_engine_mask_t stalled_mask,
+ const char *reason);
+int intel_engine_reset(struct intel_engine_cs *engine,
+ const char *reason);
+int __intel_engine_reset_bh(struct intel_engine_cs *engine,
+ const char *reason);
+
+void __i915_request_reset(struct i915_request *rq, bool guilty);
+
+int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
+void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
+
+void intel_gt_set_wedged(struct intel_gt *gt);
+bool intel_gt_unset_wedged(struct intel_gt *gt);
+int intel_gt_terminally_wedged(struct intel_gt *gt);
+
+/*
+ * There's no unset_wedged_on_init paired with this one.
+ * Once we're wedged on init, there's no going back.
+ * Same thing for unset_wedged_on_fini.
+ */
+void intel_gt_set_wedged_on_init(struct intel_gt *gt);
+void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
+
+int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
+
+int intel_reset_guc(struct intel_gt *gt);
+
+struct intel_wedge_me {
+ struct delayed_work work;
+ struct intel_gt *gt;
+ const char *name;
+};
+
+void __intel_init_wedge(struct intel_wedge_me *w,
+ struct intel_gt *gt,
+ long timeout,
+ const char *name);
+void __intel_fini_wedge(struct intel_wedge_me *w);
+
+#define intel_wedge_on_timeout(W, GT, TIMEOUT) \
+ for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
+ (W)->gt; \
+ __intel_fini_wedge((W)))
+
+bool intel_has_gpu_reset(const struct intel_gt *gt);
+bool intel_has_reset_engine(const struct intel_gt *gt);
+
+#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
new file mode 100644
index 000000000..9312b29f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_RESET_TYPES_H_
+#define __INTEL_RESET_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/srcu.h>
+
+struct intel_reset {
+ /**
+ * flags: Control various stages of the GPU reset
+ *
+ * #I915_RESET_BACKOFF - When we start a global reset, we need to
+ * serialise with any other users attempting to do the same, and
+ * any global resources that may be clobber by the reset (such as
+ * FENCE registers).
+ *
+ * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+ * acquire the struct_mutex to reset an engine, we need an explicit
+ * flag to prevent two concurrent reset attempts in the same engine.
+ * As the number of engines continues to grow, allocate the flags from
+ * the most significant bits.
+ *
+ * #I915_WEDGED - If reset fails and we can no longer use the GPU,
+ * we set the #I915_WEDGED bit. Prior to command submission, e.g.
+ * i915_request_alloc(), this bit is checked and the sequence
+ * aborted (with -EIO reported to userspace) if set.
+ *
+ * #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no
+ * longer use the GPU - similar to #I915_WEDGED bit. The difference in
+ * the way we're handling "forced" unwedged (e.g. through debugfs),
+ * which is not allowed in case we failed to initialize.
+ *
+ * #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we
+ * use it to mark that the GPU is no longer available (and prevent
+ * users from using it).
+ */
+ unsigned long flags;
+#define I915_RESET_BACKOFF 0
+#define I915_RESET_MODESET 1
+#define I915_RESET_ENGINE 2
+#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 3)
+#define I915_WEDGED_ON_FINI (BITS_PER_LONG - 2)
+#define I915_WEDGED (BITS_PER_LONG - 1)
+
+ struct mutex mutex; /* serialises wedging/unwedging */
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t queue;
+
+ struct srcu_struct backoff_srcu;
+};
+
+#endif /* _INTEL_RESET_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
new file mode 100644
index 000000000..fb99143be
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_engine.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
+
+unsigned int intel_ring_update_space(struct intel_ring *ring)
+{
+ unsigned int space;
+
+ space = __intel_ring_space(ring->head, ring->emit, ring->size);
+
+ ring->space = space;
+ return space;
+}
+
+void __intel_ring_pin(struct intel_ring *ring)
+{
+ GEM_BUG_ON(!atomic_read(&ring->pin_count));
+ atomic_inc(&ring->pin_count);
+}
+
+int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
+{
+ struct i915_vma *vma = ring->vma;
+ unsigned int flags;
+ void *addr;
+ int ret;
+
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
+
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+ if (i915_gem_object_is_stolen(vma->obj))
+ flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
+
+ ret = i915_ggtt_pin(vma, ww, 0, flags);
+ if (unlikely(ret))
+ goto err_unpin;
+
+ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
+ addr = (void __force *)i915_vma_pin_iomap(vma);
+ } else {
+ int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+
+ addr = i915_gem_object_pin_map(vma->obj, type);
+ }
+
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err_ring;
+ }
+
+ i915_vma_make_unshrinkable(vma);
+
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->emit);
+
+ ring->vaddr = addr;
+ return 0;
+
+err_ring:
+ i915_vma_unpin(vma);
+err_unpin:
+ atomic_dec(&ring->pin_count);
+ return ret;
+}
+
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ tail = intel_ring_wrap(ring, tail);
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
+void intel_ring_unpin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
+
+ i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
+ i915_vma_unpin_iomap(vma);
+ else
+ i915_gem_object_unpin_map(vma->obj);
+
+ i915_vma_make_purgeable(vma);
+ i915_vma_unpin(vma);
+}
+
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+{
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_PM_VOLATILE);
+ if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt) && !HAS_LLC(i915))
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ring->ref);
+ ring->size = size;
+ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
+
+ /*
+ * Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = size;
+ if (IS_I830(i915) || IS_I845G(i915))
+ ring->effective_size -= 2 * CACHELINE_BYTES;
+
+ intel_ring_update_space(ring);
+
+ vma = create_ring_vma(engine->gt->ggtt, size);
+ if (IS_ERR(vma)) {
+ kfree(ring);
+ return ERR_CAST(vma);
+ }
+ ring->vma = vma;
+
+ return ring;
+}
+
+void intel_ring_free(struct kref *ref)
+{
+ struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
+
+ i915_vma_put(ring->vma);
+ kfree(ring);
+}
+
+static noinline int
+wait_for_space(struct intel_ring *ring,
+ struct intel_timeline *tl,
+ unsigned int bytes)
+{
+ struct i915_request *target;
+ long timeout;
+
+ if (intel_ring_update_space(ring) >= bytes)
+ return 0;
+
+ GEM_BUG_ON(list_empty(&tl->requests));
+ list_for_each_entry(target, &tl->requests, link) {
+ if (target->ring != ring)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ if (bytes <= __intel_ring_space(target->postfix,
+ ring->emit, ring->size))
+ break;
+ }
+
+ if (GEM_WARN_ON(&target->link == &tl->requests))
+ return -ENOSPC;
+
+ timeout = i915_request_wait(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
+
+ i915_request_retire_upto(target);
+
+ intel_ring_update_space(ring);
+ GEM_BUG_ON(ring->space < bytes);
+ return 0;
+}
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
+{
+ struct intel_ring *ring = rq->ring;
+ const unsigned int remain_usable = ring->effective_size - ring->emit;
+ const unsigned int bytes = num_dwords * sizeof(u32);
+ unsigned int need_wrap = 0;
+ unsigned int total_bytes;
+ u32 *cs;
+
+ /* Packets must be qword aligned. */
+ GEM_BUG_ON(num_dwords & 1);
+
+ total_bytes = bytes + rq->reserved_space;
+ GEM_BUG_ON(total_bytes > ring->effective_size);
+
+ if (unlikely(total_bytes > remain_usable)) {
+ const int remain_actual = ring->size - ring->emit;
+
+ if (bytes > remain_usable) {
+ /*
+ * Not enough space for the basic request. So need to
+ * flush out the remainder and then wait for
+ * base + reserved.
+ */
+ total_bytes += remain_actual;
+ need_wrap = remain_actual | 1;
+ } else {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate
+ * wrap and only need to effectively wait for the
+ * reserved size from the start of ringbuffer.
+ */
+ total_bytes = rq->reserved_space + remain_actual;
+ }
+ }
+
+ if (unlikely(total_bytes > ring->space)) {
+ int ret;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the
+ * request, as that cannot be allowed to fail. During request
+ * finalisation, reserved_space is set to 0 to stop the
+ * overallocation and the assumption is that then we never need
+ * to wait (which has the risk of failing with EINTR).
+ *
+ * See also i915_request_alloc() and i915_request_add().
+ */
+ GEM_BUG_ON(!rq->reserved_space);
+
+ ret = wait_for_space(ring,
+ i915_request_timeline(rq),
+ total_bytes);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(need_wrap)) {
+ need_wrap &= ~1;
+ GEM_BUG_ON(need_wrap > ring->space);
+ GEM_BUG_ON(ring->emit + need_wrap > ring->size);
+ GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
+
+ /* Fill the tail with MI_NOOP */
+ memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
+ ring->space -= need_wrap;
+ ring->emit = 0;
+ }
+
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ GEM_BUG_ON(ring->space < bytes);
+ cs = ring->vaddr + ring->emit;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset32(cs, POISON_INUSE, bytes / sizeof(*cs));
+ ring->emit += bytes;
+ ring->space -= bytes;
+
+ return cs;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct i915_request *rq)
+{
+ int num_dwords;
+ void *cs;
+
+ num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
+ if (num_dwords == 0)
+ return 0;
+
+ num_dwords = CACHELINE_DWORDS - num_dwords;
+ GEM_BUG_ON(num_dwords & 1);
+
+ cs = intel_ring_begin(rq, num_dwords);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
+ intel_ring_advance(rq, cs + num_dwords);
+
+ GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
new file mode 100644
index 000000000..1b32dadfb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_H
+#define INTEL_RING_H
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "i915_request.h"
+#include "intel_ring_types.h"
+
+struct intel_engine_cs;
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
+int intel_ring_cacheline_align(struct i915_request *rq);
+
+unsigned int intel_ring_update_space(struct intel_ring *ring);
+
+void __intel_ring_pin(struct intel_ring *ring);
+int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww);
+void intel_ring_unpin(struct intel_ring *ring);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+ kref_get(&ring->ref);
+ return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+ kref_put(&ring->ref, intel_ring_free);
+}
+
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
+{
+ /* Dummy function.
+ *
+ * This serves as a placeholder in the code so that the reader
+ * can compare against the preceding intel_ring_begin() and
+ * check that the number of dwords emitted matches the space
+ * reserved for the command packet (i.e. the value passed to
+ * intel_ring_begin()).
+ */
+ GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
+ GEM_BUG_ON(!IS_ALIGNED(rq->ring->emit, 8)); /* RING_TAIL qword align */
+}
+
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+ return pos & (ring->size - 1);
+}
+
+static inline int intel_ring_direction(const struct intel_ring *ring,
+ u32 next, u32 prev)
+{
+ typecheck(typeof(ring->size), next);
+ typecheck(typeof(ring->size), prev);
+ return (next - prev) << ring->wrap;
+}
+
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+{
+ if (pos & -ring->size) /* must be strictly within the ring */
+ return false;
+
+ if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+ return false;
+
+ return true;
+}
+
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
+{
+ /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
+ u32 offset = addr - rq->ring->vaddr;
+
+ GEM_BUG_ON(offset > rq->ring->size);
+ return intel_ring_wrap(rq->ring, offset);
+}
+
+static inline void
+assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
+{
+ unsigned int head = READ_ONCE(ring->head);
+
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
+ /*
+ * "Ring Buffer Use"
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
+ * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
+ * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ *
+ * We use ring->head as the last known location of the actual RING_HEAD,
+ * it may have advanced but in the worst case it is equally the same
+ * as ring->head and so we should never program RING_TAIL to advance
+ * into the same cacheline as ring->head.
+ */
+#define cacheline(a) round_down(a, CACHELINE_BYTES)
+ GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
+#undef cacheline
+}
+
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+ /* Whilst writes to the tail are strictly order, there is no
+ * serialisation between readers and the writers. The tail may be
+ * read by i915_request_retire() just as it is being updated
+ * by execlists, as although the breadcrumb is complete, the context
+ * switch hasn't been seen.
+ */
+ assert_ring_tail_valid(ring, tail);
+ ring->tail = tail;
+ return tail;
+}
+
+static inline unsigned int
+__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
+{
+ /*
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+ GEM_BUG_ON(!is_power_of_2(size));
+ return (head - tail - CACHELINE_BYTES) & (size - 1);
+}
+
+#endif /* INTEL_RING_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
new file mode 100644
index 000000000..d5d6f1fad
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -0,0 +1,1422 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2008-2021 Intel Corporation
+ */
+
+#include <drm/drm_cache.h>
+
+#include "gem/i915_gem_internal.h"
+
+#include "gen2_engine_cs.h"
+#include "gen6_engine_cs.h"
+#include "gen6_ppgtt.h"
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "i915_mitigations.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
+#include "intel_engine_regs.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_regs.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "shmem_utils.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+
+/* Rough estimate of the typical request size, performing a flush,
+ * set-context and then emitting the batch.
+ */
+#define LEGACY_REQUEST_SIZE 200
+
+static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
+{
+ /*
+ * Keep the render interrupt unmasked as this papers over
+ * lost interrupts following a reset.
+ */
+ if (engine->class == RENDER_CLASS) {
+ if (GRAPHICS_VER(engine->i915) >= 6)
+ mask &= ~BIT(0);
+ else
+ mask &= ~I915_USER_INTERRUPT;
+ }
+
+ intel_engine_set_hwsp_writemask(engine, mask);
+}
+
+static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
+{
+ u32 addr;
+
+ addr = lower_32_bits(phys);
+ if (GRAPHICS_VER(engine->i915) >= 4)
+ addr |= (phys >> 28) & 0xf0;
+
+ intel_uncore_write(engine->uncore, HWS_PGA, addr);
+}
+
+static struct page *status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ return sg_page(obj->mm.pages->sgl);
+}
+
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
+{
+ set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
+ set_hwstam(engine, ~0u);
+}
+
+static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
+{
+ i915_reg_t hwsp;
+
+ /*
+ * The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (GRAPHICS_VER(engine->i915) == 7) {
+ switch (engine->id) {
+ /*
+ * No more rings exist on Gen7. Default case is only to shut up
+ * gcc switch check warning.
+ */
+ default:
+ GEM_BUG_ON(engine->id);
+ fallthrough;
+ case RCS0:
+ hwsp = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS0:
+ hwsp = BLT_HWS_PGA_GEN7;
+ break;
+ case VCS0:
+ hwsp = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS0:
+ hwsp = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (GRAPHICS_VER(engine->i915) == 6) {
+ hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
+ } else {
+ hwsp = RING_HWS_PGA(engine->mmio_base);
+ }
+
+ intel_uncore_write_fw(engine->uncore, hwsp, offset);
+ intel_uncore_posting_read_fw(engine->uncore, hwsp);
+}
+
+static void flush_cs_tlb(struct intel_engine_cs *engine)
+{
+ if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
+ return;
+
+ /* ring should be idle before issuing a sync flush*/
+ if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0)
+ drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n",
+ engine->name);
+
+ ENGINE_WRITE_FW(engine, RING_INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_INSTPM(engine->mmio_base),
+ INSTPM_SYNC_FLUSH, 0,
+ 2000, 0, NULL))
+ ENGINE_TRACE(engine,
+ "wait for SyncFlush to complete for TLB invalidation timed out\n");
+}
+
+static void ring_setup_status_page(struct intel_engine_cs *engine)
+{
+ set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
+ set_hwstam(engine, ~0u);
+
+ flush_cs_tlb(engine);
+}
+
+static struct i915_address_space *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+ return vm;
+}
+
+static u32 pp_dir(struct i915_address_space *vm)
+{
+ return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
+}
+
+static void set_pp_dir(struct intel_engine_cs *engine)
+{
+ struct i915_address_space *vm = vm_alias(engine->gt->vm);
+
+ if (!vm)
+ return;
+
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+ ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
+
+ if (GRAPHICS_VER(engine->i915) >= 7) {
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ }
+}
+
+static bool stop_ring(struct intel_engine_cs *engine)
+{
+ /* Empty the ring by skipping to the end */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
+ ENGINE_POSTING_READ(engine, RING_HEAD);
+
+ /* The ring must be empty before it is disabled */
+ ENGINE_WRITE_FW(engine, RING_CTL, 0);
+ ENGINE_POSTING_READ(engine, RING_CTL);
+
+ /* Then reset the disabled ring */
+ ENGINE_WRITE_FW(engine, RING_HEAD, 0);
+ ENGINE_WRITE_FW(engine, RING_TAIL, 0);
+
+ return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
+}
+
+static int xcs_resume(struct intel_engine_cs *engine)
+{
+ struct intel_ring *ring = engine->legacy.ring;
+
+ ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
+ ring->head, ring->tail);
+
+ /*
+ * Double check the ring is empty & disabled before we resume. Called
+ * from atomic context during PCI probe, so _hardirq().
+ */
+ intel_synchronize_hardirq(engine->i915);
+ if (!stop_ring(engine))
+ goto err;
+
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ ring_setup_phys_status_page(engine);
+ else
+ ring_setup_status_page(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ /* Enforce ordering by reading HEAD register back */
+ ENGINE_POSTING_READ(engine, RING_HEAD);
+
+ /*
+ * Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values.
+ */
+ ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
+
+ /* Check that the ring offsets point within the ring! */
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+ intel_ring_update_space(ring);
+
+ set_pp_dir(engine);
+
+ /* First wake the ring up to an empty/idle ring */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
+ ENGINE_POSTING_READ(engine, RING_TAIL);
+
+ ENGINE_WRITE_FW(engine, RING_CTL,
+ RING_CTL_SIZE(ring->size) | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+ if (__intel_wait_for_register_fw(engine->uncore,
+ RING_CTL(engine->mmio_base),
+ RING_VALID, RING_VALID,
+ 5000, 0, NULL))
+ goto err;
+
+ if (GRAPHICS_VER(engine->i915) > 2)
+ ENGINE_WRITE_FW(engine,
+ RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+
+ /* Now awake, let it get started */
+ if (ring->tail != ring->head) {
+ ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
+ ENGINE_POSTING_READ(engine, RING_TAIL);
+ }
+
+ /* Papering over lost _interrupts_ immediately following the restart */
+ intel_engine_signal_breadcrumbs(engine);
+ return 0;
+
+err:
+ drm_err(&engine->i915->drm,
+ "%s initialization failed; "
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ return -EIO;
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void xcs_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
+}
+
+static void reset_prepare(struct intel_engine_cs *engine)
+{
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ * WaClearRingBufHeadRegAtInit:ctg,elk
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ ENGINE_TRACE(engine, "\n");
+ intel_engine_stop_cs(engine);
+
+ if (!stop_ring(engine)) {
+ /* G45 ring initialization often fails to reset head to zero */
+ ENGINE_TRACE(engine,
+ "HEAD not reset to zero, "
+ "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n",
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ if (!stop_ring(engine)) {
+ drm_err(&engine->i915->drm,
+ "failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
+ }
+}
+
+static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ struct i915_request *pos, *rq;
+ unsigned long flags;
+ u32 head;
+
+ rq = NULL;
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+ rcu_read_lock();
+ list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) {
+ if (!__i915_request_is_complete(pos)) {
+ rq = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * The guilty request will get skipped on a hung engine.
+ *
+ * Users of client default contexts do not rely on logical
+ * state preserved between batches so it is safe to execute
+ * queued requests following the hang. Non default contexts
+ * rely on preserved state, so skipping a batch loses the
+ * evolution of the state and it needs to be considered corrupted.
+ * Executing more queued batches on top of corrupted state is
+ * risky. But we take the risk by trying to advance through
+ * the queued requests in order to make the client behaviour
+ * more predictable around resets, by not throwing away random
+ * amount of batches it has prepared for execution. Sophisticated
+ * clients can use gem_reset_stats_ioctl and dma fence status
+ * (exported via sync_file info ioctl on explicit fences) to observe
+ * when it loses the context state and should rebuild accordingly.
+ *
+ * The context ban, and ultimately the client ban, mechanism are safety
+ * valves if client submission ends up resulting in nothing more than
+ * subsequent hangs.
+ */
+
+ if (rq) {
+ /*
+ * Try to restore the logical GPU state to match the
+ * continuation of the request queue. If we skip the
+ * context/PD restore, then the next request may try to execute
+ * assuming that its context is valid and loaded on the GPU and
+ * so may try to access invalid memory, prompting repeated GPU
+ * hangs.
+ *
+ * If the request was guilty, we still restore the logical
+ * state in case the next request requires it (e.g. the
+ * aliasing ppgtt), but skip over the hung batch.
+ *
+ * If the request was innocent, we try to replay the request
+ * with the restored context.
+ */
+ __i915_request_reset(rq, stalled);
+
+ GEM_BUG_ON(rq->ring != engine->legacy.ring);
+ head = rq->head;
+ } else {
+ head = engine->legacy.ring->tail;
+ }
+ engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
+static void reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void reset_cancel(struct intel_engine_cs *engine)
+{
+ struct i915_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
+ i915_request_put(i915_request_mark_eio(request));
+ intel_engine_signal_breadcrumbs(engine);
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
+static void i9xx_submit_request(struct i915_request *request)
+{
+ i915_request_submit(request);
+ wmb(); /* paranoid flush writes out of the WCB before mmio */
+
+ ENGINE_WRITE(request->engine, RING_TAIL,
+ intel_ring_set_tail(request->ring, request->tail));
+}
+
+static void __ring_context_fini(struct intel_context *ce)
+{
+ i915_vma_put(ce->state);
+}
+
+static void ring_context_destroy(struct kref *ref)
+{
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (ce->state)
+ __ring_context_fini(ce);
+
+ intel_context_fini(ce);
+ intel_context_free(ce);
+}
+
+static int ring_context_init_default_state(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ struct drm_i915_gem_object *obj = ce->state->obj;
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ shmem_read(ce->engine->default_state, 0,
+ vaddr, ce->engine->context_size);
+
+ i915_gem_object_flush_map(obj);
+ __i915_gem_object_release_map(obj);
+
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ return 0;
+}
+
+static int ring_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **unused)
+{
+ struct i915_address_space *vm;
+ int err = 0;
+
+ if (ce->engine->default_state &&
+ !test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
+ err = ring_context_init_default_state(ce, ww);
+ if (err)
+ return err;
+ }
+
+ vm = vm_alias(ce->vm);
+ if (vm)
+ err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
+
+ return err;
+}
+
+static void __context_unpin_ppgtt(struct intel_context *ce)
+{
+ struct i915_address_space *vm;
+
+ vm = vm_alias(ce->vm);
+ if (vm)
+ gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
+}
+
+static void ring_context_unpin(struct intel_context *ce)
+{
+}
+
+static void ring_context_post_unpin(struct intel_context *ce)
+{
+ __context_unpin_ppgtt(ce);
+}
+
+static struct i915_vma *
+alloc_context_vma(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_shmem(i915, engine->context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ *
+ * Snooping is required on non-llc platforms in execlist
+ * mode, but since all GGTT accesses use PAT entry 0 we
+ * get snooping anyway regardless of cache_level.
+ *
+ * This is only applicable for Ivy Bridge devices since
+ * later platforms don't have L3 control bits in the PTE.
+ */
+ if (IS_IVYBRIDGE(i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
+
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int ring_context_alloc(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+
+ /* One ringbuffer to rule them all */
+ GEM_BUG_ON(!engine->legacy.ring);
+ ce->ring = engine->legacy.ring;
+ ce->timeline = intel_timeline_get(engine->legacy.timeline);
+
+ GEM_BUG_ON(ce->state);
+ if (engine->context_size) {
+ struct i915_vma *vma;
+
+ vma = alloc_context_vma(engine);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ ce->state = vma;
+ }
+
+ return 0;
+}
+
+static int ring_context_pin(struct intel_context *ce, void *unused)
+{
+ return 0;
+}
+
+static void ring_context_reset(struct intel_context *ce)
+{
+ intel_ring_reset(ce->ring, ce->ring->emit);
+ clear_bit(CONTEXT_VALID_BIT, &ce->flags);
+}
+
+static void ring_context_revoke(struct intel_context *ce,
+ struct i915_request *rq,
+ unsigned int preempt_timeout_ms)
+{
+ struct intel_engine_cs *engine;
+
+ if (!rq || !i915_request_is_active(rq))
+ return;
+
+ engine = rq->engine;
+ lockdep_assert_held(&engine->sched_engine->lock);
+ list_for_each_entry_continue(rq, &engine->sched_engine->requests,
+ sched.link)
+ if (rq->context == ce) {
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
+ }
+}
+
+static void ring_context_cancel_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = NULL;
+
+ i915_request_active_engine(rq, &engine);
+
+ if (engine && intel_engine_pulse(engine))
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "request cancellation by %s",
+ current->comm);
+}
+
+static const struct intel_context_ops ring_context_ops = {
+ .alloc = ring_context_alloc,
+
+ .cancel_request = ring_context_cancel_request,
+
+ .revoke = ring_context_revoke,
+
+ .pre_pin = ring_context_pre_pin,
+ .pin = ring_context_pin,
+ .unpin = ring_context_unpin,
+ .post_unpin = ring_context_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = ring_context_reset,
+ .destroy = ring_context_destroy,
+};
+
+static int load_pd_dir(struct i915_request *rq,
+ struct i915_address_space *vm,
+ u32 valid)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
+ *cs++ = valid;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
+ *cs++ = pp_dir(vm);
+
+ /* Stall until the page table load is complete? */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
+ *cs++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
+
+ intel_ring_advance(rq, cs);
+
+ return rq->engine->emit_flush(rq, EMIT_FLUSH);
+}
+
+static int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct drm_i915_private *i915 = engine->i915;
+ enum intel_engine_id id;
+ const int num_engines =
+ IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
+ bool force_restore = false;
+ int len;
+ u32 *cs;
+
+ len = 4;
+ if (GRAPHICS_VER(i915) == 7)
+ len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
+ else if (GRAPHICS_VER(i915) == 5)
+ len += 2;
+ if (flags & MI_FORCE_RESTORE) {
+ GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
+ flags &= ~MI_FORCE_RESTORE;
+ force_restore = true;
+ len += 2;
+ }
+
+ cs = intel_ring_begin(rq, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
+ if (GRAPHICS_VER(i915) == 7) {
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ if (num_engines) {
+ struct intel_engine_cs *signaller;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
+ for_each_engine(signaller, engine->gt, id) {
+ if (signaller == engine)
+ continue;
+
+ *cs++ = i915_mmio_reg_offset(
+ RING_PSMI_CTL(signaller->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+ }
+ } else if (GRAPHICS_VER(i915) == 5) {
+ /*
+ * This w/a is only listed for pre-production ilk a/b steppings,
+ * but is also mentioned for programming the powerctx. To be
+ * safe, just apply the workaround; we do not use SyncFlush so
+ * this should never take effect and so be a no-op!
+ */
+ *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
+ }
+
+ if (force_restore) {
+ /*
+ * The HW doesn't handle being told to restore the current
+ * context very well. Quite often it likes goes to go off and
+ * sulk, especially when it is meant to be reloading PP_DIR.
+ * A very simple fix to force the reload is to simply switch
+ * away from the current context and back again.
+ *
+ * Note that the kernel_context will contain random state
+ * following the INHIBIT_RESTORE. We accept this since we
+ * never use the kernel_context state; it is merely a
+ * placeholder we use to flush other contexts.
+ */
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
+ MI_MM_SPACE_GTT |
+ MI_RESTORE_INHIBIT;
+ }
+
+ *cs++ = MI_NOOP;
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(ce->state) | flags;
+ /*
+ * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+ * WaMiSetContext_Hang:snb,ivb,vlv
+ */
+ *cs++ = MI_NOOP;
+
+ if (GRAPHICS_VER(i915) == 7) {
+ if (num_engines) {
+ struct intel_engine_cs *signaller;
+ i915_reg_t last_reg = INVALID_MMIO_REG; /* keep gcc quiet */
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
+ for_each_engine(signaller, engine->gt, id) {
+ if (signaller == engine)
+ continue;
+
+ last_reg = RING_PSMI_CTL(signaller->mmio_base);
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = _MASKED_BIT_DISABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+
+ /* Insert a delay before the next switch! */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
+ *cs++ = MI_NOOP;
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ } else if (GRAPHICS_VER(i915) == 5) {
+ *cs++ = MI_SUSPEND_FLUSH;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int remap_l3_slice(struct i915_request *rq, int slice)
+{
+#define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
+ u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
+ int i;
+
+ if (!remap_info)
+ return 0;
+
+ cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW);
+ for (i = 0; i < L3LOG_DW; i++) {
+ *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+ *cs++ = remap_info[i];
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+#undef L3LOG_DW
+}
+
+static int remap_l3(struct i915_request *rq)
+{
+ struct i915_gem_context *ctx = i915_request_gem_context(rq);
+ int i, err;
+
+ if (!ctx || !ctx->remap_slice)
+ return 0;
+
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(ctx->remap_slice & BIT(i)))
+ continue;
+
+ err = remap_l3_slice(rq, i);
+ if (err)
+ return err;
+ }
+
+ ctx->remap_slice = 0;
+ return 0;
+}
+
+static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
+{
+ int ret;
+
+ if (!vm)
+ return 0;
+
+ ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /*
+ * Not only do we need a full barrier (post-sync write) after
+ * invalidating the TLBs, but we need to wait a little bit
+ * longer. Whether this is merely delaying us, or the
+ * subsequent flush is a key part of serialising with the
+ * post-sync op, this extra pass appears vital before a
+ * mm switch!
+ */
+ ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
+ if (ret)
+ return ret;
+
+ return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
+static int clear_residuals(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int ret;
+
+ ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
+ if (ret)
+ return ret;
+
+ if (engine->kernel_context->state) {
+ ret = mi_set_context(rq,
+ engine->kernel_context,
+ MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
+ if (ret)
+ return ret;
+ }
+
+ ret = engine->emit_bb_start(rq,
+ engine->wa_ctx.vma->node.start, 0,
+ 0);
+ if (ret)
+ return ret;
+
+ ret = engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /* Always invalidate before the next switch_mm() */
+ return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
+static int switch_context(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct intel_context *ce = rq->context;
+ void **residuals = NULL;
+ int ret;
+
+ GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
+
+ if (engine->wa_ctx.vma && ce != engine->kernel_context) {
+ if (engine->wa_ctx.vma->private != ce &&
+ i915_mitigate_clear_residuals()) {
+ ret = clear_residuals(rq);
+ if (ret)
+ return ret;
+
+ residuals = &engine->wa_ctx.vma->private;
+ }
+ }
+
+ ret = switch_mm(rq, vm_alias(ce->vm));
+ if (ret)
+ return ret;
+
+ if (ce->state) {
+ u32 flags;
+
+ GEM_BUG_ON(engine->id != RCS0);
+
+ /* For resource streamer on HSW+ and power context elsewhere */
+ BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
+ BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
+
+ flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
+ if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
+ flags |= MI_RESTORE_EXT_STATE_EN;
+ else
+ flags |= MI_RESTORE_INHIBIT;
+
+ ret = mi_set_context(rq, ce, flags);
+ if (ret)
+ return ret;
+ }
+
+ ret = remap_l3(rq);
+ if (ret)
+ return ret;
+
+ /*
+ * Now past the point of no return, this request _will_ be emitted.
+ *
+ * Or at least this preamble will be emitted, the request may be
+ * interrupted prior to submitting the user payload. If so, we
+ * still submit the "empty" request in order to preserve global
+ * state tracking such as this, our tracking of the current
+ * dirty context.
+ */
+ if (residuals) {
+ intel_context_put(*residuals);
+ *residuals = intel_context_get(ce);
+ }
+
+ return 0;
+}
+
+static int ring_request_alloc(struct i915_request *request)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
+ GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
+
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += LEGACY_REQUEST_SIZE;
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ ret = switch_context(request);
+ if (ret)
+ return ret;
+
+ request->reserved_space -= LEGACY_REQUEST_SIZE;
+ return 0;
+}
+
+static void gen6_bsd_submit_request(struct i915_request *request)
+{
+ struct intel_uncore *uncore = request->engine->uncore;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ /* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
+ intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
+ _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+
+ /* Clear the context id. Here be magic! */
+ intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
+ if (__intel_wait_for_register_fw(uncore,
+ RING_PSMI_CTL(GEN6_BSD_RING_BASE),
+ GEN6_BSD_SLEEP_INDICATOR,
+ 0,
+ 1000, 0, NULL))
+ drm_err(&uncore->i915->drm,
+ "timed out waiting for the BSD ring to wake up\n");
+
+ /* Now that the ring is fully powered up, update the tail */
+ i9xx_submit_request(request);
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
+ intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
+ _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+}
+
+static void i9xx_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = i9xx_submit_request;
+}
+
+static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = gen6_bsd_submit_request;
+}
+
+static void ring_release(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ intel_engine_cleanup_common(engine);
+
+ if (engine->wa_ctx.vma) {
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+ }
+
+ intel_ring_unpin(engine->legacy.ring);
+ intel_ring_put(engine->legacy.ring);
+
+ intel_timeline_unpin(engine->legacy.timeline);
+ intel_timeline_put(engine->legacy.timeline);
+}
+
+static void irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ intel_engine_signal_breadcrumbs(engine);
+}
+
+static void setup_irq(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ intel_engine_set_irq_handler(engine, irq_handler);
+
+ if (GRAPHICS_VER(i915) >= 6) {
+ engine->irq_enable = gen6_irq_enable;
+ engine->irq_disable = gen6_irq_disable;
+ } else if (GRAPHICS_VER(i915) >= 5) {
+ engine->irq_enable = gen5_irq_enable;
+ engine->irq_disable = gen5_irq_disable;
+ } else if (GRAPHICS_VER(i915) >= 3) {
+ engine->irq_enable = gen3_irq_enable;
+ engine->irq_disable = gen3_irq_disable;
+ } else {
+ engine->irq_enable = gen2_irq_enable;
+ engine->irq_disable = gen2_irq_disable;
+ }
+}
+
+static void add_to_engine(struct i915_request *rq)
+{
+ lockdep_assert_held(&rq->engine->sched_engine->lock);
+ list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
+}
+
+static void remove_from_engine(struct i915_request *rq)
+{
+ spin_lock_irq(&rq->engine->sched_engine->lock);
+ list_del_init(&rq->sched.link);
+
+ /* Prevent further __await_execution() registering a cb, then flush */
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+
+ spin_unlock_irq(&rq->engine->sched_engine->lock);
+
+ i915_request_notify_execute_cb_imm(rq);
+}
+
+static void setup_common(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ /* gen8+ are only supported with execlists */
+ GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
+
+ setup_irq(engine);
+
+ engine->resume = xcs_resume;
+ engine->sanitize = xcs_sanitize;
+
+ engine->reset.prepare = reset_prepare;
+ engine->reset.rewind = reset_rewind;
+ engine->reset.cancel = reset_cancel;
+ engine->reset.finish = reset_finish;
+
+ engine->add_active_request = add_to_engine;
+ engine->remove_active_request = remove_from_engine;
+
+ engine->cops = &ring_context_ops;
+ engine->request_alloc = ring_request_alloc;
+
+ /*
+ * Using a global execution timeline; the previous final breadcrumb is
+ * equivalent to our next initial bread so we can elide
+ * engine->emit_init_breadcrumb().
+ */
+ engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
+ if (GRAPHICS_VER(i915) == 5)
+ engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
+
+ engine->set_default_submission = i9xx_set_default_submission;
+
+ if (GRAPHICS_VER(i915) >= 6)
+ engine->emit_bb_start = gen6_emit_bb_start;
+ else if (GRAPHICS_VER(i915) >= 4)
+ engine->emit_bb_start = gen4_emit_bb_start;
+ else if (IS_I830(i915) || IS_I845G(i915))
+ engine->emit_bb_start = i830_emit_bb_start;
+ else
+ engine->emit_bb_start = gen3_emit_bb_start;
+}
+
+static void setup_rcs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (HAS_L3_DPF(i915))
+ engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+
+ if (GRAPHICS_VER(i915) >= 7) {
+ engine->emit_flush = gen7_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
+ } else if (GRAPHICS_VER(i915) == 6) {
+ engine->emit_flush = gen6_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
+ } else if (GRAPHICS_VER(i915) == 5) {
+ engine->emit_flush = gen4_emit_flush_rcs;
+ } else {
+ if (GRAPHICS_VER(i915) < 4)
+ engine->emit_flush = gen2_emit_flush;
+ else
+ engine->emit_flush = gen4_emit_flush_rcs;
+ engine->irq_enable_mask = I915_USER_INTERRUPT;
+ }
+
+ if (IS_HASWELL(i915))
+ engine->emit_bb_start = hsw_emit_bb_start;
+}
+
+static void setup_vcs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (GRAPHICS_VER(i915) >= 6) {
+ /* gen6 bsd needs a special wa for tail updates */
+ if (GRAPHICS_VER(i915) == 6)
+ engine->set_default_submission = gen6_bsd_set_default_submission;
+ engine->emit_flush = gen6_emit_flush_vcs;
+ engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+
+ if (GRAPHICS_VER(i915) == 6)
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
+ else
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
+ } else {
+ engine->emit_flush = gen4_emit_flush_vcs;
+ if (GRAPHICS_VER(i915) == 5)
+ engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+ else
+ engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ }
+}
+
+static void setup_bcs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ engine->emit_flush = gen6_emit_flush_xcs;
+ engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+
+ if (GRAPHICS_VER(i915) == 6)
+ engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
+ else
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
+}
+
+static void setup_vecs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
+
+ engine->emit_flush = gen6_emit_flush_xcs;
+ engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ engine->irq_enable = hsw_irq_enable_vecs;
+ engine->irq_disable = hsw_irq_disable_vecs;
+
+ engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
+}
+
+static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ return gen7_setup_clear_gpr_bb(engine, vma);
+}
+
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ struct i915_vma *vma)
+{
+ int err;
+
+ err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ return err;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_unpin;
+
+ err = gen7_ctx_switch_bb_setup(engine, vma);
+ if (err)
+ goto err_unpin;
+
+ engine->wa_ctx.vma = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size, err;
+
+ if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
+ return NULL;
+
+ err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+ if (err < 0)
+ return ERR_PTR(err);
+ if (!err)
+ return NULL;
+
+ size = ALIGN(err, PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vma);
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ vma->private = NULL;
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+int intel_ring_submission_setup(struct intel_engine_cs *engine)
+{
+ struct i915_gem_ww_ctx ww;
+ struct intel_timeline *timeline;
+ struct intel_ring *ring;
+ struct i915_vma *gen7_wa_vma;
+ int err;
+
+ setup_common(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ setup_rcs(engine);
+ break;
+ case VIDEO_DECODE_CLASS:
+ setup_vcs(engine);
+ break;
+ case COPY_ENGINE_CLASS:
+ setup_bcs(engine);
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+ setup_vecs(engine);
+ break;
+ default:
+ MISSING_CASE(engine->class);
+ return -ENODEV;
+ }
+
+ timeline = intel_timeline_create_from_engine(engine,
+ I915_GEM_HWS_SEQNO_ADDR);
+ if (IS_ERR(timeline)) {
+ err = PTR_ERR(timeline);
+ goto err;
+ }
+ GEM_BUG_ON(timeline->has_initial_breadcrumb);
+
+ ring = intel_engine_create_ring(engine, SZ_16K);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err_timeline;
+ }
+
+ GEM_BUG_ON(engine->legacy.ring);
+ engine->legacy.ring = ring;
+ engine->legacy.timeline = timeline;
+
+ gen7_wa_vma = gen7_ctx_vma(engine);
+ if (IS_ERR(gen7_wa_vma)) {
+ err = PTR_ERR(gen7_wa_vma);
+ goto err_ring;
+ }
+
+ i915_gem_ww_ctx_init(&ww, false);
+
+retry:
+ err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
+ if (!err && gen7_wa_vma)
+ err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
+ if (!err)
+ err = intel_timeline_pin(timeline, &ww);
+ if (!err) {
+ err = intel_ring_pin(ring, &ww);
+ if (err)
+ intel_timeline_unpin(timeline);
+ }
+ if (err)
+ goto out;
+
+ GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+
+ if (gen7_wa_vma) {
+ err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma);
+ if (err) {
+ intel_ring_unpin(ring);
+ intel_timeline_unpin(timeline);
+ }
+ }
+
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (err)
+ goto err_gen7_put;
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->release = ring_release;
+
+ return 0;
+
+err_gen7_put:
+ if (gen7_wa_vma) {
+ intel_context_put(gen7_wa_vma->private);
+ i915_gem_object_put(gen7_wa_vma->obj);
+ }
+err_ring:
+ intel_ring_put(ring);
+err_timeline:
+ intel_timeline_put(timeline);
+err:
+ intel_engine_cleanup_common(engine);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring_submission.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
new file mode 100644
index 000000000..49ccb76dd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_TYPES_H
+#define INTEL_RING_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+/*
+ * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+
+struct i915_vma;
+
+struct intel_ring {
+ struct kref ref;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
+ u32 head; /* updated during retire, loosely tracks RING_HEAD */
+ u32 tail; /* updated on submission, used for RING_TAIL */
+ u32 emit; /* updated during request construction */
+
+ u32 space;
+ u32 size;
+ u32 wrap;
+ u32 effective_size;
+};
+
+#endif /* INTEL_RING_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
new file mode 100644
index 000000000..6b86250c3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -0,0 +1,2591 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_breadcrumbs.h"
+#include "intel_gt.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_gt_regs.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pcode.h"
+#include "intel_rps.h"
+#include "vlv_sideband.h"
+#include "../../../platform/x86/intel_ips.h"
+
+#define BUSY_MAX_EI 20u /* ms */
+
+/*
+ * Lock protecting IPS related data structures
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+static struct intel_gt *rps_to_gt(struct intel_rps *rps)
+{
+ return container_of(rps, struct intel_gt, rps);
+}
+
+static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->i915;
+}
+
+static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->uncore;
+}
+
+static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ return &gt->uc.guc.slpc;
+}
+
+static bool rps_uses_slpc(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ return intel_uc_uses_guc_slpc(&gt->uc);
+}
+
+static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
+{
+ return mask & ~rps->pm_intrmsk_mbz;
+}
+
+static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
+static void rps_timer(struct timer_list *t)
+{
+ struct intel_rps *rps = from_timer(rps, t, timer);
+ struct intel_engine_cs *engine;
+ ktime_t dt, last, timestamp;
+ enum intel_engine_id id;
+ s64 max_busy[3] = {};
+
+ timestamp = 0;
+ for_each_engine(engine, rps_to_gt(rps), id) {
+ s64 busy;
+ int i;
+
+ dt = intel_engine_get_busy_time(engine, &timestamp);
+ last = engine->stats.rps;
+ engine->stats.rps = dt;
+
+ busy = ktime_to_ns(ktime_sub(dt, last));
+ for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
+ if (busy > max_busy[i])
+ swap(busy, max_busy[i]);
+ }
+ }
+ last = rps->pm_timestamp;
+ rps->pm_timestamp = timestamp;
+
+ if (intel_rps_is_active(rps)) {
+ s64 busy;
+ int i;
+
+ dt = ktime_sub(timestamp, last);
+
+ /*
+ * Our goal is to evaluate each engine independently, so we run
+ * at the lowest clocks required to sustain the heaviest
+ * workload. However, a task may be split into sequential
+ * dependent operations across a set of engines, such that
+ * the independent contributions do not account for high load,
+ * but overall the task is GPU bound. For example, consider
+ * video decode on vcs followed by colour post-processing
+ * on vecs, followed by general post-processing on rcs.
+ * Since multi-engines being active does imply a single
+ * continuous workload across all engines, we hedge our
+ * bets by only contributing a factor of the distributed
+ * load into our busyness calculation.
+ */
+ busy = max_busy[0];
+ for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
+ if (!max_busy[i])
+ break;
+
+ busy += div_u64(max_busy[i], 1 << i);
+ }
+ GT_TRACE(rps_to_gt(rps),
+ "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
+ busy, (int)div64_u64(100 * busy, dt),
+ max_busy[0], max_busy[1], max_busy[2],
+ rps->pm_interval);
+
+ if (100 * busy > rps->power.up_threshold * dt &&
+ rps->cur_freq < rps->max_freq_softlimit) {
+ rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
+ rps->pm_interval = 1;
+ schedule_work(&rps->work);
+ } else if (100 * busy < rps->power.down_threshold * dt &&
+ rps->cur_freq > rps->min_freq_softlimit) {
+ rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
+ rps->pm_interval = 1;
+ schedule_work(&rps->work);
+ } else {
+ rps->last_adj = 0;
+ }
+
+ mod_timer(&rps->timer,
+ jiffies + msecs_to_jiffies(rps->pm_interval));
+ rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
+ }
+}
+
+static void rps_start_timer(struct intel_rps *rps)
+{
+ rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
+ rps->pm_interval = 1;
+ mod_timer(&rps->timer, jiffies + 1);
+}
+
+static void rps_stop_timer(struct intel_rps *rps)
+{
+ del_timer_sync(&rps->timer);
+ rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
+ cancel_work_sync(&rps->work);
+}
+
+static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
+{
+ u32 mask = 0;
+
+ /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
+ if (val > rps->min_freq_softlimit)
+ mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ if (val < rps->max_freq_softlimit)
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+
+ mask &= rps->pm_events;
+
+ return rps_pm_sanitize_mask(rps, ~mask);
+}
+
+static void rps_reset_ei(struct intel_rps *rps)
+{
+ memset(&rps->ei, 0, sizeof(rps->ei));
+}
+
+static void rps_enable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ GEM_BUG_ON(rps_uses_slpc(rps));
+
+ GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
+ rps->pm_events, rps_pm_mask(rps, rps->last_freq));
+
+ rps_reset_ei(rps);
+
+ spin_lock_irq(gt->irq_lock);
+ gen6_gt_pm_enable_irq(gt, rps->pm_events);
+ spin_unlock_irq(gt->irq_lock);
+
+ intel_uncore_write(gt->uncore,
+ GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
+}
+
+static void gen6_rps_reset_interrupts(struct intel_rps *rps)
+{
+ gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
+}
+
+static void gen11_rps_reset_interrupts(struct intel_rps *rps)
+{
+ while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
+ ;
+}
+
+static void rps_reset_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ spin_lock_irq(gt->irq_lock);
+ if (GRAPHICS_VER(gt->i915) >= 11)
+ gen11_rps_reset_interrupts(rps);
+ else
+ gen6_rps_reset_interrupts(rps);
+
+ rps->pm_iir = 0;
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static void rps_disable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ intel_uncore_write(gt->uncore,
+ GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
+
+ spin_lock_irq(gt->irq_lock);
+ gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(gt->irq_lock);
+
+ intel_synchronize_irq(gt->i915);
+
+ /*
+ * Now that we will not be generating any more work, flush any
+ * outstanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&rps->work);
+
+ rps_reset_interrupts(rps);
+ GT_TRACE(gt, "interrupts:off\n");
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+static void gen5_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fmax, fmin, fstart;
+ u32 rgvmodectl;
+ int c_m, i;
+
+ if (i915->fsb_freq <= 3200)
+ c_m = 0;
+ else if (i915->fsb_freq <= 4800)
+ c_m = 1;
+ else
+ c_m = 2;
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
+ rps->ips.m = cparams[i].m;
+ rps->ips.c = cparams[i].c;
+ break;
+ }
+ }
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ rps->min_freq = fmax;
+ rps->efficient_freq = fstart;
+ rps->max_freq = fmin;
+}
+
+static unsigned long
+__ips_chipset_val(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ unsigned long now = jiffies_to_msecs(jiffies), dt;
+ unsigned long result;
+ u64 total, delta;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ /*
+ * Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ dt = now - ips->last_time1;
+ if (dt <= 10)
+ return ips->chipset_power;
+
+ /* FIXME: handle per-counter overflow */
+ total = intel_uncore_read(uncore, DMIEC);
+ total += intel_uncore_read(uncore, DDREC);
+ total += intel_uncore_read(uncore, CSIEC);
+
+ delta = total - ips->last_count1;
+
+ result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
+
+ ips->last_count1 = total;
+ ips->last_time1 = now;
+
+ ips->chipset_power = result;
+
+ return result;
+}
+
+static unsigned long ips_mch_val(struct intel_uncore *uncore)
+{
+ unsigned int m, x, b;
+ u32 tsfs;
+
+ tsfs = intel_uncore_read(uncore, TSFS);
+ x = intel_uncore_read8(uncore, TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+ m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
+
+ return m * x / 127 - b;
+}
+
+static int _pxvid_to_vd(u8 pxvid)
+{
+ if (pxvid == 0)
+ return 0;
+
+ if (pxvid >= 8 && pxvid < 31)
+ pxvid = 31;
+
+ return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
+{
+ const int vd = _pxvid_to_vd(pxvid);
+
+ if (INTEL_INFO(i915)->is_mobile)
+ return max(vd - 1125, 0);
+
+ return vd;
+}
+
+static void __gen5_ips_update(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ u64 now, delta, dt;
+ u32 count;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ now = ktime_get_raw_ns();
+ dt = now - ips->last_time2;
+ do_div(dt, NSEC_PER_MSEC);
+
+ /* Don't divide by 0 */
+ if (dt <= 10)
+ return;
+
+ count = intel_uncore_read(uncore, GFXEC);
+ delta = count - ips->last_count2;
+
+ ips->last_count2 = count;
+ ips->last_time2 = now;
+
+ /* More magic constants... */
+ ips->gfx_power = div_u64(delta * 1181, dt * 10);
+}
+
+static void gen5_rps_update(struct intel_rps *rps)
+{
+ spin_lock_irq(&mchdev_lock);
+ __gen5_ips_update(&rps->ips);
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static unsigned int gen5_invert_freq(struct intel_rps *rps,
+ unsigned int val)
+{
+ /* Invert the frequency bin into an ips delay */
+ val = rps->max_freq - val;
+ val = rps->min_freq + val;
+
+ return val;
+}
+
+static int __gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return -EBUSY; /* still busy with another command */
+ }
+
+ /* Invert the frequency bin into an ips delay */
+ val = gen5_invert_freq(rps, val);
+
+ rgvswctl =
+ (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) |
+ MEMCTL_SFCAVM;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+ intel_uncore_posting_read16(uncore, MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+
+ return 0;
+}
+
+static int gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err;
+
+ spin_lock_irq(&mchdev_lock);
+ err = __gen5_rps_set(rps, val);
+ spin_unlock_irq(&mchdev_lock);
+
+ return err;
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ return div * 133333 / (pre << post);
+}
+
+static unsigned int init_emon(struct intel_uncore *uncore)
+{
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ intel_uncore_write(uncore, ECR, 0);
+ intel_uncore_posting_read(uncore, ECR);
+
+ /* Program energy weights for various events */
+ intel_uncore_write(uncore, SDEW, 0x15040d00);
+ intel_uncore_write(uncore, CSIEW0, 0x007f0000);
+ intel_uncore_write(uncore, CSIEW1, 0x1e220004);
+ intel_uncore_write(uncore, CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ intel_uncore_write(uncore, PEW(i), 0);
+ for (i = 0; i < 3; i++)
+ intel_uncore_write(uncore, DEW(i), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
+ unsigned int freq = intel_pxfreq(pxvidfreq);
+ unsigned int vid =
+ (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+ unsigned int val;
+
+ val = vid * vid * freq / 1000 * 255;
+ val /= 127 * 127 * 900;
+
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ intel_uncore_write(uncore, PXW(i),
+ pxw[i * 4 + 0] << 24 |
+ pxw[i * 4 + 1] << 16 |
+ pxw[i * 4 + 2] << 8 |
+ pxw[i * 4 + 3] << 0);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ intel_uncore_write(uncore, OGW0, 0);
+ intel_uncore_write(uncore, OGW1, 0);
+ intel_uncore_write(uncore, EG0, 0x00007f00);
+ intel_uncore_write(uncore, EG1, 0x0000000e);
+ intel_uncore_write(uncore, EG2, 0x000e0000);
+ intel_uncore_write(uncore, EG3, 0x68000300);
+ intel_uncore_write(uncore, EG4, 0x42000000);
+ intel_uncore_write(uncore, EG5, 0x00140031);
+ intel_uncore_write(uncore, EG6, 0);
+ intel_uncore_write(uncore, EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ intel_uncore_write(uncore, PXWL(i), 0);
+
+ /* Enable PMON + select events */
+ intel_uncore_write(uncore, ECR, 0x80000019);
+
+ return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
+}
+
+static bool gen5_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fstart, vstart;
+ u32 rgvmodectl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Enable temp reporting */
+ intel_uncore_write16(uncore, PMMISC,
+ intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
+ intel_uncore_write16(uncore, TSC1,
+ intel_uncore_read16(uncore, TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ intel_uncore_write(uncore, RCUPEI, 100000);
+ intel_uncore_write(uncore, RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ intel_uncore_write(uncore, RCBMAXAVG, 90000);
+ intel_uncore_write(uncore, RCBMINAVG, 80000);
+
+ intel_uncore_write(uncore, MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
+ PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ intel_uncore_write(uncore, VIDSTART, vstart);
+ intel_uncore_posting_read(uncore, VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
+
+ if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
+ MEMCTL_CMD_STS) == 0, 10))
+ drm_err(&uncore->i915->drm,
+ "stuck trying to change perf mode\n");
+ mdelay(1);
+
+ __gen5_rps_set(rps, rps->cur_freq);
+
+ rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
+ rps->ips.last_time1 = jiffies_to_msecs(jiffies);
+
+ rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
+ rps->ips.last_time2 = ktime_get_raw_ns();
+
+ spin_lock(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PCU_EVENT);
+ spin_unlock(&i915->irq_lock);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ rps->ips.corr = init_emon(uncore);
+
+ return true;
+}
+
+static void gen5_rps_disable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ spin_lock(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PCU_EVENT);
+ spin_unlock(&i915->irq_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ intel_uncore_write(uncore, MEMINTREN,
+ intel_uncore_read(uncore, MEMINTREN) &
+ ~MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+
+ /* Go back to the starting frequency */
+ __gen5_rps_set(rps, rps->idle_freq);
+ mdelay(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
+ mdelay(1);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static u32 rps_limits(struct intel_rps *rps, u8 val)
+{
+ u32 limits;
+
+ /*
+ * Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt.
+ */
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ limits = rps->max_freq_softlimit << 23;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 14;
+ } else {
+ limits = rps->max_freq_softlimit << 24;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 16;
+ }
+
+ return limits;
+}
+
+static void rps_set_power(struct intel_rps *rps, int new_power)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ struct intel_uncore *uncore = gt->uncore;
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
+
+ lockdep_assert_held(&rps->power.mutex);
+
+ if (new_power == rps->power.mode)
+ return;
+
+ threshold_up = 95;
+ threshold_down = 85;
+
+ /* Note the units here are not exactly 1us, but 1280ns. */
+ switch (new_power) {
+ case LOW_POWER:
+ ei_up = 16000;
+ ei_down = 32000;
+ break;
+
+ case BETWEEN:
+ ei_up = 13000;
+ ei_down = 32000;
+ break;
+
+ case HIGH_POWER:
+ ei_up = 10000;
+ ei_down = 32000;
+ break;
+ }
+
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(gt->i915))
+ goto skip_hw_write;
+
+ GT_TRACE(gt,
+ "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
+ new_power, threshold_up, ei_up, threshold_down, ei_down);
+
+ set(uncore, GEN6_RP_UP_EI,
+ intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
+ set(uncore, GEN6_RP_UP_THRESHOLD,
+ intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
+
+ set(uncore, GEN6_RP_DOWN_EI,
+ intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
+ set(uncore, GEN6_RP_DOWN_THRESHOLD,
+ intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
+
+ set(uncore, GEN6_RP_CONTROL,
+ (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+skip_hw_write:
+ rps->power.mode = new_power;
+ rps->power.up_threshold = threshold_up;
+ rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
+{
+ int new_power;
+
+ new_power = rps->power.mode;
+ switch (rps->power.mode) {
+ case LOW_POWER:
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val <= rps->min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val >= rps->max_freq_softlimit)
+ new_power = HIGH_POWER;
+
+ mutex_lock(&rps->power.mutex);
+ if (rps->power.interactive)
+ new_power = HIGH_POWER;
+ rps_set_power(rps, new_power);
+ mutex_unlock(&rps->power.mutex);
+}
+
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
+{
+ GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n",
+ str_yes_no(interactive));
+
+ mutex_lock(&rps->power.mutex);
+ if (interactive) {
+ if (!rps->power.interactive++ && intel_rps_is_active(rps))
+ rps_set_power(rps, HIGH_POWER);
+ } else {
+ GEM_BUG_ON(!rps->power.interactive);
+ rps->power.interactive--;
+ }
+ mutex_unlock(&rps->power.mutex);
+}
+
+static int gen6_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 swreq;
+
+ GEM_BUG_ON(rps_uses_slpc(rps));
+
+ if (GRAPHICS_VER(i915) >= 9)
+ swreq = GEN9_FREQUENCY(val);
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ swreq = HSW_FREQUENCY(val);
+ else
+ swreq = (GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ set(uncore, GEN6_RPNSWREQ, swreq);
+
+ GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
+ val, intel_gpu_freq(rps, val), swreq);
+
+ return 0;
+}
+
+static int vlv_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ vlv_punit_get(i915);
+ err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_punit_put(i915);
+
+ GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
+ val, intel_gpu_freq(rps, val));
+
+ return err;
+}
+
+static int rps_set(struct intel_rps *rps, u8 val, bool update)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ if (val == rps->last_freq)
+ return 0;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_rps_set(rps, val);
+ else if (GRAPHICS_VER(i915) >= 6)
+ err = gen6_rps_set(rps, val);
+ else
+ err = gen5_rps_set(rps, val);
+ if (err)
+ return err;
+
+ if (update && GRAPHICS_VER(i915) >= 6)
+ gen6_rps_set_thresholds(rps, val);
+ rps->last_freq = val;
+
+ return 0;
+}
+
+void intel_rps_unpark(struct intel_rps *rps)
+{
+ if (!intel_rps_is_enabled(rps))
+ return;
+
+ GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
+
+ /*
+ * Use the user's desired frequency as a guide, but for better
+ * performance, jump directly to RPe as our starting frequency.
+ */
+ mutex_lock(&rps->lock);
+
+ intel_rps_set_active(rps);
+ intel_rps_set(rps,
+ clamp(rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit));
+
+ mutex_unlock(&rps->lock);
+
+ rps->pm_iir = 0;
+ if (intel_rps_has_interrupts(rps))
+ rps_enable_interrupts(rps);
+ if (intel_rps_uses_timer(rps))
+ rps_start_timer(rps);
+
+ if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
+ gen5_rps_update(rps);
+}
+
+void intel_rps_park(struct intel_rps *rps)
+{
+ int adj;
+
+ if (!intel_rps_is_enabled(rps))
+ return;
+
+ if (!intel_rps_clear_active(rps))
+ return;
+
+ if (intel_rps_uses_timer(rps))
+ rps_stop_timer(rps);
+ if (intel_rps_has_interrupts(rps))
+ rps_disable_interrupts(rps);
+
+ if (rps->last_freq <= rps->idle_freq)
+ return;
+
+ /*
+ * The punit delays the write of the frequency and voltage until it
+ * determines the GPU is awake. During normal usage we don't want to
+ * waste power changing the frequency if the GPU is sleeping (rc6).
+ * However, the GPU and driver is now idle and we do not want to delay
+ * switching to minimum voltage (reducing power whilst idle) as we do
+ * not expect to be woken in the near future and so must flush the
+ * change by waking the device.
+ *
+ * We choose to take the media powerwell (either would do to trick the
+ * punit into committing the voltage change) as that takes a lot less
+ * power than the render powerwell.
+ */
+ intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+ rps_set(rps, rps->idle_freq, false);
+ intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+
+ /*
+ * Since we will try and restart from the previously requested
+ * frequency on unparking, treat this idle point as a downclock
+ * interrupt and reduce the frequency for resume. If we park/unpark
+ * more frequently than the rps worker can run, we will not respond
+ * to any EI and never see a change in frequency.
+ *
+ * (Note we accommodate Cherryview's limitation of only using an
+ * even bin by applying it to all.)
+ */
+ adj = rps->last_adj;
+ if (adj < 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = -2;
+ rps->last_adj = adj;
+ rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
+ if (rps->cur_freq < rps->efficient_freq) {
+ rps->cur_freq = rps->efficient_freq;
+ rps->last_adj = 0;
+ }
+
+ GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
+}
+
+u32 intel_rps_get_boost_frequency(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ return slpc->boost_freq;
+ } else {
+ return intel_gpu_freq(rps, rps->boost_freq);
+ }
+}
+
+static int rps_set_boost_freq(struct intel_rps *rps, u32 val)
+{
+ bool boost = false;
+
+ /* Validate against (static) hardware limits */
+ val = intel_freq_opcode(rps, val);
+ if (val < rps->min_freq || val > rps->max_freq)
+ return -EINVAL;
+
+ mutex_lock(&rps->lock);
+ if (val != rps->boost_freq) {
+ rps->boost_freq = val;
+ boost = atomic_read(&rps->num_waiters);
+ }
+ mutex_unlock(&rps->lock);
+ if (boost)
+ schedule_work(&rps->work);
+
+ return 0;
+}
+
+int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ return intel_guc_slpc_set_boost_freq(slpc, freq);
+ } else {
+ return rps_set_boost_freq(rps, freq);
+ }
+}
+
+void intel_rps_dec_waiters(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ intel_guc_slpc_dec_waiters(slpc);
+ } else {
+ atomic_dec(&rps->num_waiters);
+ }
+}
+
+void intel_rps_boost(struct i915_request *rq)
+{
+ struct intel_guc_slpc *slpc;
+
+ if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
+ return;
+
+ /* Serializes with i915_request_retire() */
+ if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
+
+ if (rps_uses_slpc(rps)) {
+ slpc = rps_to_slpc(rps);
+
+ /* Return if old value is non zero */
+ if (!atomic_fetch_inc(&slpc->num_waiters))
+ schedule_work(&slpc->boost_work);
+
+ return;
+ }
+
+ if (atomic_fetch_inc(&rps->num_waiters))
+ return;
+
+ if (!intel_rps_is_active(rps))
+ return;
+
+ GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
+ rq->fence.context, rq->fence.seqno);
+
+ if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ schedule_work(&rps->work);
+
+ WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
+ }
+}
+
+int intel_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err;
+
+ lockdep_assert_held(&rps->lock);
+ GEM_BUG_ON(val > rps->max_freq);
+ GEM_BUG_ON(val < rps->min_freq);
+
+ if (intel_rps_is_active(rps)) {
+ err = rps_set(rps, val, true);
+ if (err)
+ return err;
+
+ /*
+ * Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ if (intel_rps_has_interrupts(rps)) {
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ set(uncore,
+ GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
+
+ set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
+ }
+ }
+
+ rps->cur_freq = val;
+ return 0;
+}
+
+static u32 intel_rps_read_state_cap(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ if (IS_PONTEVECCHIO(i915))
+ return intel_uncore_read(uncore, PVC_RP_STATE_CAP);
+ else if (IS_XEHPSDV(i915))
+ return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
+ else if (IS_GEN9_LP(i915))
+ return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ else
+ return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+}
+
+/**
+ * gen6_rps_get_freq_caps - Get freq caps exposed by HW
+ * @rps: the intel_rps structure
+ * @caps: returned freq caps
+ *
+ * Returned "caps" frequencies should be converted to MHz using
+ * intel_gpu_freq()
+ */
+void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 rp_state_cap;
+
+ rp_state_cap = intel_rps_read_state_cap(rps);
+
+ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
+ if (IS_GEN9_LP(i915)) {
+ caps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ caps->min_freq = (rp_state_cap >> 0) & 0xff;
+ } else {
+ caps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ if (GRAPHICS_VER(i915) >= 10)
+ caps->rp1_freq = REG_FIELD_GET(RPE_MASK,
+ intel_uncore_read(to_gt(i915)->uncore,
+ GEN10_FREQ_INFO_REC));
+ else
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ caps->min_freq = (rp_state_cap >> 16) & 0xff;
+ }
+
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
+ /*
+ * In this case rp_state_cap register reports frequencies in
+ * units of 50 MHz. Convert these to the actual "hw unit", i.e.
+ * units of 16.67 MHz
+ */
+ caps->rp0_freq *= GEN9_FREQ_SCALER;
+ caps->rp1_freq *= GEN9_FREQ_SCALER;
+ caps->min_freq *= GEN9_FREQ_SCALER;
+ }
+}
+
+static void gen6_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
+ rps->rp0_freq = caps.rp0_freq;
+ rps->rp1_freq = caps.rp1_freq;
+ rps->min_freq = caps.min_freq;
+
+ /* hw_max = RP0 until we check for overclocking */
+ rps->max_freq = rps->rp0_freq;
+
+ rps->efficient_freq = rps->rp1_freq;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+ IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
+ u32 ddcc_status = 0;
+ u32 mult = 1;
+
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11)
+ mult = GEN9_FREQ_SCALER;
+ if (snb_pcode_read(rps_to_gt(rps)->uncore,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status, NULL) == 0)
+ rps->efficient_freq =
+ clamp_t(u32,
+ ((ddcc_status >> 8) & 0xff) * mult,
+ rps->min_freq,
+ rps->max_freq);
+ }
+}
+
+static bool rps_reset(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ /* force a reset */
+ rps->power.mode = -1;
+ rps->last_freq = -1;
+
+ if (rps_set(rps, rps->min_freq, true)) {
+ drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
+ return false;
+ }
+
+ rps->cur_freq = rps->min_freq;
+ return true;
+}
+
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
+static bool gen9_rps_enable(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ struct intel_uncore *uncore = gt->uncore;
+
+ /* Program defaults and thresholds for RPS */
+ if (GRAPHICS_VER(gt->i915) == 9)
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(rps->rp1_freq));
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
+
+ rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
+ return rps_reset(rps);
+}
+
+static bool gen8_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(rps->rp1_freq));
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
+ return rps_reset(rps);
+}
+
+static bool gen6_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Power down if completely idle for over 50ms */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ return rps_reset(rps);
+}
+
+static int chv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_gt *gt = rps_to_gt(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ switch (gt->info.sseu.eu_total) {
+ case 8:
+ /* (2 * 4) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
+ break;
+ case 12:
+ /* (2 * 6) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
+ break;
+ case 16:
+ /* (2 * 8) config */
+ default:
+ /* Setting (2 * 8) Min RP0 for any other combination */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
+ break;
+ }
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static int chv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+ val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
+
+ return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+}
+
+static int chv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static u32 chv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+ val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static bool chv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ /* 1: Program defaults and thresholds for RPS*/
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* 2: Enable RPS */
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ /* Setting Fixed Bias */
+ vlv_punit_get(i915);
+
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+ "GPLL not enabled\n");
+
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n",
+ str_yes_no(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static int vlv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp1;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
+ rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int vlv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp0;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+ /* Clamp to max */
+ rp0 = min_t(u32, rp0, 0xea);
+
+ return rp0;
+}
+
+static int vlv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rpe;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+ return rpe;
+}
+
+static int vlv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+ /*
+ * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
+ * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
+ * a BYT-M B0 the above register contains 0xbf. Moreover when setting
+ * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
+ * to make sure it matches what Punit accepts.
+ */
+ return max_t(u32, val, 0xc0);
+}
+
+static bool vlv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ /* WaGsvRC0ResidencyMethod:vlv */
+ rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+
+ vlv_punit_get(i915);
+
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+ "GPLL not enabled\n");
+
+ drm_dbg(&i915->drm, "GPLL enabled? %s\n",
+ str_yes_no(val & GPLLENABLE));
+ drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static unsigned long __ips_gfx_val(struct intel_ips *ips)
+{
+ struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ unsigned int t, state1, state2;
+ u32 pxvid, ext_v;
+ u64 corr, corr2;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
+
+ state1 = ext_v;
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ t = ips_mch_val(uncore);
+ if (t > 80)
+ corr = t * 2349 + 135940;
+ else if (t >= 50)
+ corr = t * 964 + 29317;
+ else /* < 50 */
+ corr = t * 301 + 1004;
+
+ corr = div_u64(corr * 150142 * state1, 10000) - 78642;
+ corr2 = div_u64(corr, 100000) * ips->corr;
+
+ state2 = div_u64(corr2 * state1, 10000);
+ state2 /= 100; /* convert to mW */
+
+ __gen5_ips_update(ips);
+
+ return ips->gfx_power + state2;
+}
+
+static bool has_busy_stats(struct intel_rps *rps)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, rps_to_gt(rps), id) {
+ if (!intel_engine_supports_stats(engine))
+ return false;
+ }
+
+ return true;
+}
+
+void intel_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ bool enabled = false;
+
+ if (!HAS_RPS(i915))
+ return;
+
+ if (rps_uses_slpc(rps))
+ return;
+
+ intel_gt_check_clock_frequency(rps_to_gt(rps));
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (rps->max_freq <= rps->min_freq)
+ /* leave disabled, no room for dynamic reclocking */;
+ else if (IS_CHERRYVIEW(i915))
+ enabled = chv_rps_enable(rps);
+ else if (IS_VALLEYVIEW(i915))
+ enabled = vlv_rps_enable(rps);
+ else if (GRAPHICS_VER(i915) >= 9)
+ enabled = gen9_rps_enable(rps);
+ else if (GRAPHICS_VER(i915) >= 8)
+ enabled = gen8_rps_enable(rps);
+ else if (GRAPHICS_VER(i915) >= 6)
+ enabled = gen6_rps_enable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ enabled = gen5_rps_enable(rps);
+ else
+ MISSING_CASE(GRAPHICS_VER(i915));
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ if (!enabled)
+ return;
+
+ GT_TRACE(rps_to_gt(rps),
+ "min:%x, max:%x, freq:[%d, %d]\n",
+ rps->min_freq, rps->max_freq,
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->max_freq));
+
+ GEM_BUG_ON(rps->max_freq < rps->min_freq);
+ GEM_BUG_ON(rps->idle_freq > rps->max_freq);
+
+ GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
+ GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
+
+ if (has_busy_stats(rps))
+ intel_rps_set_timer(rps);
+ else if (GRAPHICS_VER(i915) >= 6 && GRAPHICS_VER(i915) <= 11)
+ intel_rps_set_interrupts(rps);
+ else
+ /* Ironlake currently uses intel_ips.ko */ {}
+
+ intel_rps_set_enabled(rps);
+}
+
+static void gen6_rps_disable(struct intel_rps *rps)
+{
+ set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+}
+
+void intel_rps_disable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (!intel_rps_is_enabled(rps))
+ return;
+
+ intel_rps_clear_enabled(rps);
+ intel_rps_clear_interrupts(rps);
+ intel_rps_clear_timer(rps);
+
+ if (GRAPHICS_VER(i915) >= 6)
+ gen6_rps_disable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_disable(rps);
+}
+
+static int byt_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val - 0xb7
+ * Slow = Fast = GPLL ref * N
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
+}
+
+static int byt_freq_opcode(struct intel_rps *rps, int val)
+{
+ return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
+}
+
+static int chv_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val / 2
+ * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
+}
+
+static int chv_freq_opcode(struct intel_rps *rps, int val)
+{
+ /* CHV needs even values */
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
+}
+
+int intel_gpu_freq(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (GRAPHICS_VER(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+ GEN9_FREQ_SCALER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_gpu_freq(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_gpu_freq(rps, val);
+ else if (GRAPHICS_VER(i915) >= 6)
+ return val * GT_FREQUENCY_MULTIPLIER;
+ else
+ return val;
+}
+
+int intel_freq_opcode(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (GRAPHICS_VER(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+ GT_FREQUENCY_MULTIPLIER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_freq_opcode(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_freq_opcode(rps, val);
+ else if (GRAPHICS_VER(i915) >= 6)
+ return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
+ else
+ return val;
+}
+
+static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->gpll_ref_freq =
+ vlv_get_cck_clock(i915, "GPLL ref",
+ CCK_GPLL_CLOCK_CONTROL,
+ i915->czclk_freq);
+
+ drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
+ rps->gpll_ref_freq);
+}
+
+static void vlv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = vlv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
+
+ rps->efficient_freq = vlv_rps_rpe_freq(rps);
+ drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
+
+ rps->rp1_freq = vlv_rps_guar_freq(rps);
+ drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
+
+ rps->min_freq = vlv_rps_min_freq(rps);
+ drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+}
+
+static void chv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = chv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
+
+ rps->efficient_freq = chv_rps_rpe_freq(rps);
+ drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
+
+ rps->rp1_freq = chv_rps_guar_freq(rps);
+ drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
+
+ rps->min_freq = chv_rps_min_freq(rps);
+ drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
+ rps->rp1_freq | rps->min_freq) & 1,
+ "Odd GPU freq values\n");
+}
+
+static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
+{
+ ei->ktime = ktime_get_raw();
+ ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
+ ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
+}
+
+static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ const struct intel_rps_ei *prev = &rps->ei;
+ struct intel_rps_ei now;
+ u32 events = 0;
+
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+ return 0;
+
+ vlv_c0_read(uncore, &now);
+
+ if (prev->ktime) {
+ u64 time, c0;
+ u32 render, media;
+
+ time = ktime_us_delta(now.ktime, prev->ktime);
+
+ time *= rps_to_i915(rps)->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ render = now.render_c0 - prev->render_c0;
+ media = now.media_c0 - prev->media_c0;
+ c0 = max(render, media);
+ c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
+
+ if (c0 > time * rps->power.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * rps->power.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
+ }
+
+ rps->ei = now;
+ return events;
+}
+
+static void rps_work(struct work_struct *work)
+{
+ struct intel_rps *rps = container_of(work, typeof(*rps), work);
+ struct intel_gt *gt = rps_to_gt(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ bool client_boost = false;
+ int new_freq, adj, min, max;
+ u32 pm_iir = 0;
+
+ spin_lock_irq(gt->irq_lock);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
+ client_boost = atomic_read(&rps->num_waiters);
+ spin_unlock_irq(gt->irq_lock);
+
+ /* Make sure we didn't queue anything we're not going to process. */
+ if (!pm_iir && !client_boost)
+ goto out;
+
+ mutex_lock(&rps->lock);
+ if (!intel_rps_is_active(rps)) {
+ mutex_unlock(&rps->lock);
+ return;
+ }
+
+ pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
+
+ adj = rps->last_adj;
+ new_freq = rps->cur_freq;
+ min = rps->min_freq_softlimit;
+ max = rps->max_freq_softlimit;
+ if (client_boost)
+ max = rps->max_freq;
+
+ GT_TRACE(gt,
+ "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
+ pm_iir, str_yes_no(client_boost),
+ adj, new_freq, min, max);
+
+ if (client_boost && new_freq < rps->boost_freq) {
+ new_freq = rps->boost_freq;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
+
+ if (new_freq >= rps->max_freq_softlimit)
+ adj = 0;
+ } else if (client_boost) {
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+ if (rps->cur_freq > rps->efficient_freq)
+ new_freq = rps->efficient_freq;
+ else if (rps->cur_freq > rps->min_freq_softlimit)
+ new_freq = rps->min_freq_softlimit;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (adj < 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
+
+ if (new_freq <= rps->min_freq_softlimit)
+ adj = 0;
+ } else { /* unknown event */
+ adj = 0;
+ }
+
+ /*
+ * sysfs frequency limits may have snuck in while
+ * servicing the interrupt
+ */
+ new_freq += adj;
+ new_freq = clamp_t(int, new_freq, min, max);
+
+ if (intel_rps_set(rps, new_freq)) {
+ drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
+ adj = 0;
+ }
+ rps->last_adj = adj;
+
+ mutex_unlock(&rps->lock);
+
+out:
+ spin_lock_irq(gt->irq_lock);
+ gen6_gt_pm_unmask_irq(gt, rps->pm_events);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ const u32 events = rps->pm_events & pm_iir;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ GT_TRACE(gt, "irq events:%x\n", events);
+
+ gen6_gt_pm_mask_irq(gt, events);
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
+
+ events = pm_iir & rps->pm_events;
+ if (events) {
+ spin_lock(gt->irq_lock);
+
+ GT_TRACE(gt, "irq events:%x\n", events);
+
+ gen6_gt_pm_mask_irq(gt, events);
+ rps->pm_iir |= events;
+
+ schedule_work(&rps->work);
+ spin_unlock(gt->irq_lock);
+ }
+
+ if (GRAPHICS_VER(gt->i915) >= 8)
+ return;
+
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
+
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
+}
+
+void gen5_rps_irq_handler(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_freq;
+
+ spin_lock(&mchdev_lock);
+
+ intel_uncore_write16(uncore,
+ MEMINTRSTS,
+ intel_uncore_read(uncore, MEMINTRSTS));
+
+ intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
+ busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
+ max_avg = intel_uncore_read(uncore, RCBMAXAVG);
+ min_avg = intel_uncore_read(uncore, RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ new_freq = rps->cur_freq;
+ if (busy_up > max_avg)
+ new_freq++;
+ else if (busy_down < min_avg)
+ new_freq--;
+ new_freq = clamp(new_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
+
+ if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
+ rps->cur_freq = new_freq;
+
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_rps_init_early(struct intel_rps *rps)
+{
+ mutex_init(&rps->lock);
+ mutex_init(&rps->power.mutex);
+
+ INIT_WORK(&rps->work, rps_work);
+ timer_setup(&rps->timer, rps_timer, 0);
+
+ atomic_set(&rps->num_waiters, 0);
+}
+
+void intel_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (rps_uses_slpc(rps))
+ return;
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rps_init(rps);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rps_init(rps);
+ else if (GRAPHICS_VER(i915) >= 6)
+ gen6_rps_init(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_init(rps);
+
+ /* Derive initial user preferences/limits from the hardware limits */
+ rps->max_freq_softlimit = rps->max_freq;
+ rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit;
+ rps->min_freq_softlimit = rps->min_freq;
+ rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit;
+
+ /* After setting max-softlimit, find the overclock max freq */
+ if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
+ u32 params = 0;
+
+ snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, &params, NULL);
+ if (params & BIT(31)) { /* OC supported */
+ drm_dbg(&i915->drm,
+ "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (rps->max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
+ rps->max_freq = params & 0xff;
+ }
+ }
+
+ /* Finally allow us to boost to max by default */
+ rps->boost_freq = rps->max_freq;
+ rps->idle_freq = rps->min_freq;
+
+ /* Start in the middle, from here we will autotune based on workload */
+ rps->cur_freq = rps->efficient_freq;
+
+ rps->pm_intrmsk_mbz = 0;
+
+ /*
+ * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (GRAPHICS_VER(i915) <= 7)
+ rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+
+ /* GuC needs ARAT expired interrupt unmasked */
+ if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc))
+ rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+}
+
+void intel_rps_sanitize(struct intel_rps *rps)
+{
+ if (rps_uses_slpc(rps))
+ return;
+
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
+ rps_disable_interrupts(rps);
+}
+
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 cagf;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ cagf = (rpstat >> 8) & 0xff;
+ else if (GRAPHICS_VER(i915) >= 9)
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else if (GRAPHICS_VER(i915) >= 6)
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+ else
+ cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
+ MEMSTAT_PSTATE_SHIFT);
+
+ return cagf;
+}
+
+static u32 read_cagf(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 freq;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_punit_get(i915);
+ freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(i915);
+ } else if (GRAPHICS_VER(i915) >= 6) {
+ freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
+ } else {
+ freq = intel_uncore_read(uncore, MEMSTAT_ILK);
+ }
+
+ return intel_rps_get_cagf(rps, freq);
+}
+
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
+{
+ struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
+
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_gpu_freq(rps, read_cagf(rps));
+
+ return freq;
+}
+
+u32 intel_rps_read_punit_req(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
+
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+ return freq;
+}
+
+static u32 intel_rps_get_req(u32 pureq)
+{
+ u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT;
+
+ return req;
+}
+
+u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps)
+{
+ u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps));
+
+ return intel_gpu_freq(rps, freq);
+}
+
+u32 intel_rps_get_requested_frequency(struct intel_rps *rps)
+{
+ if (rps_uses_slpc(rps))
+ return intel_rps_read_punit_req_frequency(rps);
+ else
+ return intel_gpu_freq(rps, rps->cur_freq);
+}
+
+u32 intel_rps_get_max_frequency(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+ if (rps_uses_slpc(rps))
+ return slpc->max_freq_softlimit;
+ else
+ return intel_gpu_freq(rps, rps->max_freq_softlimit);
+}
+
+/**
+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the max frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->rp0_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->max_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
+u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+ if (rps_uses_slpc(rps))
+ return slpc->rp0_freq;
+ else
+ return intel_gpu_freq(rps, rps->rp0_freq);
+}
+
+u32 intel_rps_get_rp1_frequency(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+ if (rps_uses_slpc(rps))
+ return slpc->rp1_freq;
+ else
+ return intel_gpu_freq(rps, rps->rp1_freq);
+}
+
+u32 intel_rps_get_rpn_frequency(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+ if (rps_uses_slpc(rps))
+ return slpc->min_freq;
+ else
+ return intel_gpu_freq(rps, rps->min_freq);
+}
+
+static int set_max_freq(struct intel_rps *rps, u32 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int ret = 0;
+
+ mutex_lock(&rps->lock);
+
+ val = intel_freq_opcode(rps, val);
+ if (val < rps->min_freq ||
+ val > rps->max_freq ||
+ val < rps->min_freq_softlimit) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (val > rps->rp0_freq)
+ drm_dbg(&i915->drm, "User requested overclocking to %d\n",
+ intel_gpu_freq(rps, val));
+
+ rps->max_freq_softlimit = val;
+
+ val = clamp_t(int, rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
+
+ /*
+ * We still need *_set_rps to process the new max_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
+
+unlock:
+ mutex_unlock(&rps->lock);
+
+ return ret;
+}
+
+int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+ if (rps_uses_slpc(rps))
+ return intel_guc_slpc_set_max_freq(slpc, val);
+ else
+ return set_max_freq(rps, val);
+}
+
+u32 intel_rps_get_min_frequency(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+ if (rps_uses_slpc(rps))
+ return slpc->min_freq_softlimit;
+ else
+ return intel_gpu_freq(rps, rps->min_freq_softlimit);
+}
+
+/**
+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the min frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->min_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->min_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
+static int set_min_freq(struct intel_rps *rps, u32 val)
+{
+ int ret = 0;
+
+ mutex_lock(&rps->lock);
+
+ val = intel_freq_opcode(rps, val);
+ if (val < rps->min_freq ||
+ val > rps->max_freq ||
+ val > rps->max_freq_softlimit) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ rps->min_freq_softlimit = val;
+
+ val = clamp_t(int, rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
+
+ /*
+ * We still need *_set_rps to process the new min_delay and
+ * update the interrupt limits and PMINTRMSK even though
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
+
+unlock:
+ mutex_unlock(&rps->lock);
+
+ return ret;
+}
+
+int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+ if (rps_uses_slpc(rps))
+ return intel_guc_slpc_set_min_freq(slpc, val);
+ else
+ return set_min_freq(rps, val);
+}
+
+static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE;
+
+ /* Allow punit to process software requests */
+ intel_uncore_write(uncore, GEN6_RP_CONTROL, state);
+}
+
+void intel_rps_raise_unslice(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ mutex_lock(&rps->lock);
+
+ if (rps_uses_slpc(rps)) {
+ /* RP limits have not been initialized yet for SLPC path */
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
+
+ intel_rps_set_manual(rps, true);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ,
+ ((caps.rp0_freq <<
+ GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
+ GEN9_IGNORE_SLICE_RATIO));
+ intel_rps_set_manual(rps, false);
+ } else {
+ intel_rps_set(rps, rps->rp0_freq);
+ }
+
+ mutex_unlock(&rps->lock);
+}
+
+void intel_rps_lower_unslice(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ mutex_lock(&rps->lock);
+
+ if (rps_uses_slpc(rps)) {
+ /* RP limits have not been initialized yet for SLPC path */
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
+
+ intel_rps_set_manual(rps, true);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ,
+ ((caps.min_freq <<
+ GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
+ GEN9_IGNORE_SLICE_RATIO));
+ intel_rps_set_manual(rps, false);
+ } else {
+ intel_rps_set(rps, rps->min_freq);
+ }
+
+ mutex_unlock(&rps->lock);
+}
+
+static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ val = intel_uncore_read(gt->uncore, reg32);
+
+ return val;
+}
+
+bool rps_read_mask_mmio(struct intel_rps *rps,
+ i915_reg_t reg32, u32 mask)
+{
+ return rps_read_mmio(rps, reg32) & mask;
+}
+
+/* External interface for intel_ips.ko */
+
+static struct drm_i915_private __rcu *ips_mchdev;
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_rps_driver_register(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ /*
+ * We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values.
+ */
+ if (GRAPHICS_VER(gt->i915) == 5) {
+ GEM_BUG_ON(ips_mchdev);
+ rcu_assign_pointer(ips_mchdev, gt->i915);
+ ips_ping_for_i915_load();
+ }
+}
+
+void intel_rps_driver_unregister(struct intel_rps *rps)
+{
+ if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
+ rcu_assign_pointer(ips_mchdev, NULL);
+}
+
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = rcu_dereference(ips_mchdev);
+ if (i915 && !kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ struct intel_ips *ips = &to_gt(i915)->rps.ips;
+
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __ips_chipset_val(ips);
+ graphics_val = __ips_gfx_val(ips);
+ spin_unlock_irq(&mchdev_lock);
+ }
+
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &to_gt(i915)->rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit < rps->max_freq)
+ rps->max_freq_softlimit++;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &to_gt(i915)->rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit > rps->min_freq)
+ rps->max_freq_softlimit--;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *i915;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ ret = to_gt(i915)->awake;
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &to_gt(i915)->rps;
+
+ spin_lock_irq(&mchdev_lock);
+ rps->max_freq_softlimit = rps->min_freq;
+ ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq);
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rps.c"
+#include "selftest_slpc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
new file mode 100644
index 000000000..4509dfdc5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_H
+#define INTEL_RPS_H
+
+#include "intel_rps_types.h"
+#include "i915_reg_defs.h"
+
+struct i915_request;
+
+void intel_rps_init_early(struct intel_rps *rps);
+void intel_rps_init(struct intel_rps *rps);
+void intel_rps_sanitize(struct intel_rps *rps);
+
+void intel_rps_driver_register(struct intel_rps *rps);
+void intel_rps_driver_unregister(struct intel_rps *rps);
+
+void intel_rps_enable(struct intel_rps *rps);
+void intel_rps_disable(struct intel_rps *rps);
+
+void intel_rps_park(struct intel_rps *rps);
+void intel_rps_unpark(struct intel_rps *rps);
+void intel_rps_boost(struct i915_request *rq);
+void intel_rps_dec_waiters(struct intel_rps *rps);
+u32 intel_rps_get_boost_frequency(struct intel_rps *rps);
+int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq);
+
+int intel_rps_set(struct intel_rps *rps, u8 val);
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
+
+int intel_gpu_freq(struct intel_rps *rps, int val);
+int intel_freq_opcode(struct intel_rps *rps, int val);
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
+u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
+int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
+u32 intel_rps_get_max_frequency(struct intel_rps *rps);
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps);
+int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
+u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
+u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
+u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
+u32 intel_rps_read_punit_req(struct intel_rps *rps);
+u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
+void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps);
+void intel_rps_raise_unslice(struct intel_rps *rps);
+void intel_rps_lower_unslice(struct intel_rps *rps);
+
+u32 intel_rps_read_throttle_reason(struct intel_rps *rps);
+bool rps_read_mask_mmio(struct intel_rps *rps, i915_reg_t reg32, u32 mask);
+
+void gen5_rps_irq_handler(struct intel_rps *rps);
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+
+static inline bool intel_rps_is_enabled(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_set_enabled(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_clear_enabled(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline bool intel_rps_is_active(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline void intel_rps_set_active(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_clear_active(struct intel_rps *rps)
+{
+ return test_and_clear_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_has_interrupts(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline void intel_rps_set_interrupts(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline void intel_rps_clear_interrupts(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
+}
+
+static inline bool intel_rps_uses_timer(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+static inline void intel_rps_set_timer(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+static inline void intel_rps_clear_timer(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_TIMER, &rps->flags);
+}
+
+#endif /* INTEL_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
new file mode 100644
index 000000000..9173ec75f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_TYPES_H
+#define INTEL_RPS_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_ips {
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ u64 last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c, m;
+};
+
+struct intel_rps_ei {
+ ktime_t ktime;
+ u32 render_c0;
+ u32 media_c0;
+};
+
+enum {
+ INTEL_RPS_ENABLED = 0,
+ INTEL_RPS_ACTIVE,
+ INTEL_RPS_INTERRUPTS,
+ INTEL_RPS_TIMER,
+};
+
+/**
+ * struct intel_rps_freq_caps - rps freq capabilities
+ * @rp0_freq: non-overclocked max frequency
+ * @rp1_freq: "less than" RP0 power/freqency
+ * @min_freq: aka RPn, minimum frequency
+ *
+ * Freq caps exposed by HW, values are in "hw units" and intel_gpu_freq()
+ * should be used to convert to MHz
+ */
+struct intel_rps_freq_caps {
+ u8 rp0_freq;
+ u8 rp1_freq;
+ u8 min_freq;
+};
+
+struct intel_rps {
+ struct mutex lock; /* protects enabling and the worker */
+
+ /*
+ * work, interrupts_enabled and pm_iir are protected by
+ * dev_priv->irq_lock
+ */
+ struct timer_list timer;
+ struct work_struct work;
+ unsigned long flags;
+
+ ktime_t pm_timestamp;
+ u32 pm_interval;
+ u32 pm_iir;
+
+ /* PM interrupt bits that should never be masked */
+ u32 pm_intrmsk_mbz;
+ u32 pm_events;
+
+ /* Frequencies are stored in potentially platform dependent multiples.
+ * In other words, *_freq needs to be multiplied by X to be interesting.
+ * Soft limits are those which are used for the dynamic reclocking done
+ * by the driver (raise frequencies under heavy loads, and lower for
+ * lighter loads). Hard limits are those imposed by the hardware.
+ *
+ * A distinction is made for overclocking, which is never enabled by
+ * default, and is considered to be above the hard limit if it's
+ * possible at all.
+ */
+ u8 cur_freq; /* Current frequency (cached, may not == HW) */
+ u8 last_freq; /* Last SWREQ frequency */
+ u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
+ u8 max_freq_softlimit; /* Max frequency permitted by the driver */
+ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
+ u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 boost_freq; /* Frequency to request when wait boosting */
+ u8 idle_freq; /* Frequency to request when we are idle */
+ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
+ u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp0_freq; /* Non-overclocked max frequency. */
+ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
+
+ int last_adj;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
+
+ atomic_t num_waiters;
+ unsigned int boosts;
+
+ /* manual wa residency calculations */
+ struct intel_rps_ei ei;
+ struct intel_ips ips;
+};
+
+#endif /* INTEL_RPS_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c
new file mode 100644
index 000000000..e8f3d18c1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "i915_drv.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_sa_media.h"
+
+int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
+ u32 gsi_offset)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore;
+
+ uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ uncore->gsi_offset = gsi_offset;
+
+ gt->irq_lock = to_gt(i915)->irq_lock;
+ intel_gt_common_init_early(gt);
+ intel_uncore_init_early(uncore, gt);
+
+ /*
+ * Standalone media shares the general MMIO space with the primary
+ * GT. We'll re-use the primary GT's mapping.
+ */
+ uncore->regs = i915->uncore.regs;
+ if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
+ return -EIO;
+
+ gt->uncore = uncore;
+ gt->phys_addr = phys_addr;
+
+ /*
+ * For current platforms we can assume there's only a single
+ * media GT and cache it for quick lookup.
+ */
+ drm_WARN_ON(&i915->drm, i915->media_gt);
+ i915->media_gt = gt;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.h b/drivers/gpu/drm/i915/gt/intel_sa_media.h
new file mode 100644
index 000000000..3afb310de
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __INTEL_SA_MEDIA__
+#define __INTEL_SA_MEDIA__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
+ u32 gsi_offset);
+
+#endif /* __INTEL_SA_MEDIA_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
new file mode 100644
index 000000000..66f21c735
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include "i915_drv.h"
+#include "intel_engine_regs.h"
+#include "intel_gt_regs.h"
+#include "intel_sseu.h"
+
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice)
+{
+ sseu->max_slices = max_slices;
+ sseu->max_subslices = max_subslices;
+ sseu->max_eus_per_subslice = max_eus_per_subslice;
+}
+
+unsigned int
+intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ unsigned int i, total = 0;
+
+ if (sseu->has_xehp_dss)
+ return bitmap_weight(sseu->subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask));
+
+ for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask.hsw); i++)
+ total += hweight8(sseu->subslice_mask.hsw[i]);
+
+ return total;
+}
+
+unsigned int
+intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice)
+{
+ WARN_ON(sseu->has_xehp_dss);
+ if (WARN_ON(slice >= sseu->max_slices))
+ return 0;
+
+ return sseu->subslice_mask.hsw[slice];
+}
+
+static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ if (sseu->has_xehp_dss) {
+ WARN_ON(slice > 0);
+ return sseu->eu_mask.xehp[subslice];
+ } else {
+ return sseu->eu_mask.hsw[slice][subslice];
+ }
+}
+
+static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
+ u16 eu_mask)
+{
+ GEM_WARN_ON(eu_mask && __fls(eu_mask) >= sseu->max_eus_per_subslice);
+ if (sseu->has_xehp_dss) {
+ GEM_WARN_ON(slice > 0);
+ sseu->eu_mask.xehp[subslice] = eu_mask;
+ } else {
+ sseu->eu_mask.hsw[slice][subslice] = eu_mask;
+ }
+}
+
+static u16 compute_eu_total(const struct sseu_dev_info *sseu)
+{
+ int s, ss, total = 0;
+
+ for (s = 0; s < sseu->max_slices; s++)
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (sseu->has_xehp_dss)
+ total += hweight16(sseu->eu_mask.xehp[ss]);
+ else
+ total += hweight16(sseu->eu_mask.hsw[s][ss]);
+
+ return total;
+}
+
+/**
+ * intel_sseu_copy_eumask_to_user - Copy EU mask into a userspace buffer
+ * @to: Pointer to userspace buffer to copy to
+ * @sseu: SSEU structure containing EU mask to copy
+ *
+ * Copies the EU mask to a userspace buffer in the format expected by
+ * the query ioctl's topology queries.
+ *
+ * Returns the result of the copy_to_user() operation.
+ */
+int intel_sseu_copy_eumask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu)
+{
+ u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE] = {};
+ int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ int len = sseu->max_slices * sseu->max_subslices * eu_stride;
+ int s, ss, i;
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ int uapi_offset =
+ s * sseu->max_subslices * eu_stride +
+ ss * eu_stride;
+ u16 mask = sseu_get_eus(sseu, s, ss);
+
+ for (i = 0; i < eu_stride; i++)
+ eu_mask[uapi_offset + i] =
+ (mask >> (BITS_PER_BYTE * i)) & 0xff;
+ }
+ }
+
+ return copy_to_user(to, eu_mask, len);
+}
+
+/**
+ * intel_sseu_copy_ssmask_to_user - Copy subslice mask into a userspace buffer
+ * @to: Pointer to userspace buffer to copy to
+ * @sseu: SSEU structure containing subslice mask to copy
+ *
+ * Copies the subslice mask to a userspace buffer in the format expected by
+ * the query ioctl's topology queries.
+ *
+ * Returns the result of the copy_to_user() operation.
+ */
+int intel_sseu_copy_ssmask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu)
+{
+ u8 ss_mask[GEN_SS_MASK_SIZE] = {};
+ int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ int len = sseu->max_slices * ss_stride;
+ int s, ss, i;
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ i = s * ss_stride * BITS_PER_BYTE + ss;
+
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ continue;
+
+ ss_mask[i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
+ }
+ }
+
+ return copy_to_user(to, ss_mask, len);
+}
+
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u32 ss_en, u16 eu_en)
+{
+ u32 valid_ss_mask = GENMASK(sseu->max_subslices - 1, 0);
+ int ss;
+
+ sseu->slice_mask |= BIT(0);
+ sseu->subslice_mask.hsw[0] = ss_en & valid_ss_mask;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, 0, ss))
+ sseu_set_eus(sseu, 0, ss, eu_en);
+
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void xehp_compute_sseu_info(struct sseu_dev_info *sseu,
+ u16 eu_en)
+{
+ int ss;
+
+ sseu->slice_mask |= BIT(0);
+
+ bitmap_or(sseu->subslice_mask.xehp,
+ sseu->compute_subslice_mask.xehp,
+ sseu->geometry_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask));
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, 0, ss))
+ sseu_set_eus(sseu, 0, ss, eu_en);
+
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void
+xehp_load_dss_mask(struct intel_uncore *uncore,
+ intel_sseu_ss_mask_t *ssmask,
+ int numregs,
+ ...)
+{
+ va_list argp;
+ u32 fuse_val[I915_MAX_SS_FUSE_REGS] = {};
+ int i;
+
+ if (WARN_ON(numregs > I915_MAX_SS_FUSE_REGS))
+ numregs = I915_MAX_SS_FUSE_REGS;
+
+ va_start(argp, numregs);
+ for (i = 0; i < numregs; i++)
+ fuse_val[i] = intel_uncore_read(uncore, va_arg(argp, i915_reg_t));
+ va_end(argp);
+
+ bitmap_from_arr32(ssmask->xehp, fuse_val, numregs * 32);
+}
+
+static void xehp_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ int num_compute_regs, num_geometry_regs;
+ int eu;
+
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ num_geometry_regs = 0;
+ num_compute_regs = 2;
+ } else {
+ num_geometry_regs = 1;
+ num_compute_regs = 1;
+ }
+
+ /*
+ * The concept of slice has been removed in Xe_HP. To be compatible
+ * with prior generations, assume a single slice across the entire
+ * device. Then calculate out the DSS for each workload type within
+ * that software slice.
+ */
+ intel_sseu_set_info(sseu, 1,
+ 32 * max(num_geometry_regs, num_compute_regs),
+ HAS_ONE_EU_PER_FUSE_BIT(gt->i915) ? 8 : 16);
+ sseu->has_xehp_dss = 1;
+
+ xehp_load_dss_mask(uncore, &sseu->geometry_subslice_mask,
+ num_geometry_regs,
+ GEN12_GT_GEOMETRY_DSS_ENABLE);
+ xehp_load_dss_mask(uncore, &sseu->compute_subslice_mask,
+ num_compute_regs,
+ GEN12_GT_COMPUTE_DSS_ENABLE,
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT);
+
+ eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
+
+ if (HAS_ONE_EU_PER_FUSE_BIT(gt->i915))
+ eu_en = eu_en_fuse;
+ else
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ xehp_compute_sseu_info(sseu, eu_en);
+}
+
+static void gen12_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 g_dss_en;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ u8 s_en;
+ int eu;
+
+ /*
+ * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
+ * Instead of splitting these, provide userspace with an array
+ * of DSS to more closely represent the hardware resource.
+ */
+ intel_sseu_set_info(sseu, 1, 6, 16);
+
+ /*
+ * Although gen12 architecture supported multiple slices, TGL, RKL,
+ * DG1, and ADL only had a single slice.
+ */
+ s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+ GEN11_GT_S_ENA_MASK;
+ drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
+
+ g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE);
+
+ /* one bit per pair of EUs */
+ eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+ GEN11_EU_DIS_MASK);
+
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ gen11_compute_sseu_info(sseu, g_dss_en, eu_en);
+
+ /* TGL only supports slice-level power gating */
+ sseu->has_slice_pg = 1;
+}
+
+static void gen11_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 ss_en;
+ u8 eu_en;
+ u8 s_en;
+
+ if (IS_JSL_EHL(gt->i915))
+ intel_sseu_set_info(sseu, 1, 4, 8);
+ else
+ intel_sseu_set_info(sseu, 1, 8, 8);
+
+ /*
+ * Although gen11 architecture supported multiple slices, ICL and
+ * EHL/JSL only had a single slice in practice.
+ */
+ s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+ GEN11_GT_S_ENA_MASK;
+ drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
+
+ ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
+
+ eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+ GEN11_EU_DIS_MASK);
+
+ gen11_compute_sseu_info(sseu, ss_en, eu_en);
+
+ /* ICL has no power gating restrictions. */
+ sseu->has_slice_pg = 1;
+ sseu->has_subslice_pg = 1;
+ sseu->has_eu_pg = 1;
+}
+
+static void cherryview_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ u32 fuse;
+
+ fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT);
+
+ sseu->slice_mask = BIT(0);
+ intel_sseu_set_info(sseu, 1, 2, 8);
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
+
+ sseu->subslice_mask.hsw[0] |= BIT(0);
+ sseu_set_eus(sseu, 0, 0, ~disabled_mask & 0xFF);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
+
+ sseu->subslice_mask.hsw[0] |= BIT(1);
+ sseu_set_eus(sseu, 0, 1, ~disabled_mask & 0xFF);
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
+ sseu->eu_total /
+ intel_sseu_subslice_total(sseu) :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
+ sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 fuse2, eu_disable, subslice_mask;
+ const u8 eu_mask = 0xff;
+ int s, ss;
+
+ fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+ sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+
+ /* BXT has a single slice and at most 3 subslices. */
+ intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3,
+ IS_GEN9_LP(i915) ? 3 : 4, 8);
+
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ subslice_mask = (1 << sseu->max_subslices) - 1;
+ subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT);
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < sseu->max_slices; s++) {
+ if (!(sseu->slice_mask & BIT(s)))
+ /* skip disabled slice */
+ continue;
+
+ sseu->subslice_mask.hsw[s] = subslice_mask;
+
+ eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ int eu_per_ss;
+ u8 eu_disabled_mask;
+
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask & eu_mask);
+
+ eu_per_ss = sseu->max_eus_per_subslice -
+ hweight8(eu_disabled_mask);
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_per_ss == 7)
+ sseu->subslice_7eu[s] |= BIT(ss);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery. BXT is expected to be perfectly uniform in EU
+ * distribution.
+ */
+ sseu->eu_per_subslice =
+ intel_sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+ 0;
+
+ /*
+ * SKL+ supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice. BXT+ supports subslice
+ * power gating on devices with more than one subslice, and
+ * supports EU power gating on devices with more than one EU
+ * pair per subslice.
+ */
+ sseu->has_slice_pg =
+ !IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1;
+ sseu->has_subslice_pg =
+ IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1;
+ sseu->has_eu_pg = sseu->eu_per_subslice > 2;
+
+ if (IS_GEN9_LP(i915)) {
+#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask.hsw[0] & BIT(ss)))
+ RUNTIME_INFO(i915)->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
+
+ sseu->min_eu_in_pool = 0;
+ if (HAS_POOLED_EU(i915)) {
+ if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+ sseu->min_eu_in_pool = 3;
+ else if (IS_SS_DISABLED(1))
+ sseu->min_eu_in_pool = 6;
+ else
+ sseu->min_eu_in_pool = 9;
+ }
+#undef IS_SS_DISABLED
+ }
+}
+
+static void bdw_sseu_info_init(struct intel_gt *gt)
+{
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ struct intel_uncore *uncore = gt->uncore;
+ int s, ss;
+ u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
+ u32 eu_disable0, eu_disable1, eu_disable2;
+
+ fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
+ sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ intel_sseu_set_info(sseu, 3, 3, 8);
+
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
+ subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+ GEN8_F2_SS_DIS_SHIFT);
+ eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0);
+ eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1);
+ eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2);
+ eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK;
+ eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) |
+ ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) <<
+ (32 - GEN8_EU_DIS0_S1_SHIFT));
+ eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) |
+ ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) <<
+ (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < sseu->max_slices; s++) {
+ if (!(sseu->slice_mask & BIT(s)))
+ /* skip disabled slice */
+ continue;
+
+ sseu->subslice_mask.hsw[s] = subslice_mask;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u8 eu_disabled_mask;
+ u32 n_disabled;
+
+ if (!intel_sseu_has_subslice(sseu, s, ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_disabled_mask =
+ eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask & 0xFF);
+
+ n_disabled = hweight8(eu_disabled_mask);
+
+ /*
+ * Record which subslices have 7 EUs.
+ */
+ if (sseu->max_eus_per_subslice - n_disabled == 7)
+ sseu->subslice_7eu[s] |= 1 << ss;
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /*
+ * BDW is expected to always have a uniform distribution of EU across
+ * subslices with the exception that any one EU in any one subslice may
+ * be fused off for die recovery.
+ */
+ sseu->eu_per_subslice =
+ intel_sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
+ 0;
+
+ /*
+ * BDW supports slice power gating on devices with more than
+ * one slice.
+ */
+ sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
+}
+
+static void hsw_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct sseu_dev_info *sseu = &gt->info.sseu;
+ u32 fuse1;
+ u8 subslice_mask = 0;
+ int s, ss;
+
+ /*
+ * There isn't a register to tell us how many slices/subslices. We
+ * work off the PCI-ids here.
+ */
+ switch (INTEL_INFO(i915)->gt) {
+ default:
+ MISSING_CASE(INTEL_INFO(i915)->gt);
+ fallthrough;
+ case 1:
+ sseu->slice_mask = BIT(0);
+ subslice_mask = BIT(0);
+ break;
+ case 2:
+ sseu->slice_mask = BIT(0);
+ subslice_mask = BIT(0) | BIT(1);
+ break;
+ case 3:
+ sseu->slice_mask = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
+ break;
+ }
+
+ fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
+ switch (REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1)) {
+ default:
+ MISSING_CASE(REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1));
+ fallthrough;
+ case HSW_F1_EU_DIS_10EUS:
+ sseu->eu_per_subslice = 10;
+ break;
+ case HSW_F1_EU_DIS_8EUS:
+ sseu->eu_per_subslice = 8;
+ break;
+ case HSW_F1_EU_DIS_6EUS:
+ sseu->eu_per_subslice = 6;
+ break;
+ }
+
+ intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
+ hweight8(subslice_mask),
+ sseu->eu_per_subslice);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ sseu->subslice_mask.hsw[s] = subslice_mask;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ sseu_set_eus(sseu, s, ss,
+ (1UL << sseu->eu_per_subslice) - 1);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /* No powergating for you. */
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
+}
+
+void intel_sseu_info_init(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ xehp_sseu_info_init(gt);
+ else if (GRAPHICS_VER(i915) >= 12)
+ gen12_sseu_info_init(gt);
+ else if (GRAPHICS_VER(i915) >= 11)
+ gen11_sseu_info_init(gt);
+ else if (GRAPHICS_VER(i915) >= 9)
+ gen9_sseu_info_init(gt);
+ else if (IS_BROADWELL(i915))
+ bdw_sseu_info_init(gt);
+ else if (IS_CHERRYVIEW(i915))
+ cherryview_sseu_info_init(gt);
+ else if (IS_HASWELL(i915))
+ hsw_sseu_info_init(gt);
+}
+
+u32 intel_sseu_make_rpcs(struct intel_gt *gt,
+ const struct intel_sseu *req_sseu)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const struct sseu_dev_info *sseu = &gt->info.sseu;
+ bool subslice_pg = sseu->has_subslice_pg;
+ u8 slices, subslices;
+ u32 rpcs = 0;
+
+ /*
+ * No explicit RPCS request is needed to ensure full
+ * slice/subslice/EU enablement prior to Gen9.
+ */
+ if (GRAPHICS_VER(i915) < 9)
+ return 0;
+
+ /*
+ * If i915/perf is active, we want a stable powergating configuration
+ * on the system. Use the configuration pinned by i915/perf.
+ */
+ if (i915->perf.exclusive_stream)
+ req_sseu = &i915->perf.sseu;
+
+ slices = hweight8(req_sseu->slice_mask);
+ subslices = hweight8(req_sseu->subslice_mask);
+
+ /*
+ * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
+ * wide and Icelake has up to eight subslices, specfial programming is
+ * needed in order to correctly enable all subslices.
+ *
+ * According to documentation software must consider the configuration
+ * as 2x4x8 and hardware will translate this to 1x8x8.
+ *
+ * Furthemore, even though SScount is three bits, maximum documented
+ * value for it is four. From this some rules/restrictions follow:
+ *
+ * 1.
+ * If enabled subslice count is greater than four, two whole slices must
+ * be enabled instead.
+ *
+ * 2.
+ * When more than one slice is enabled, hardware ignores the subslice
+ * count altogether.
+ *
+ * From these restrictions it follows that it is not possible to enable
+ * a count of subslices between the SScount maximum of four restriction,
+ * and the maximum available number on a particular SKU. Either all
+ * subslices are enabled, or a count between one and four on the first
+ * slice.
+ */
+ if (GRAPHICS_VER(i915) == 11 &&
+ slices == 1 &&
+ subslices > min_t(u8, 4, hweight8(sseu->subslice_mask.hsw[0]) / 2)) {
+ GEM_BUG_ON(subslices & 1);
+
+ subslice_pg = false;
+ slices *= 2;
+ }
+
+ /*
+ * Starting in Gen9, render power gating can leave
+ * slice/subslice/EU in a partially enabled state. We
+ * must make an explicit request through RPCS for full
+ * enablement.
+ */
+ if (sseu->has_slice_pg) {
+ u32 mask, val = slices;
+
+ if (GRAPHICS_VER(i915) >= 11) {
+ mask = GEN11_RPCS_S_CNT_MASK;
+ val <<= GEN11_RPCS_S_CNT_SHIFT;
+ } else {
+ mask = GEN8_RPCS_S_CNT_MASK;
+ val <<= GEN8_RPCS_S_CNT_SHIFT;
+ }
+
+ GEM_BUG_ON(val & ~mask);
+ val &= mask;
+
+ rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
+ }
+
+ if (subslice_pg) {
+ u32 val = subslices;
+
+ val <<= GEN8_RPCS_SS_CNT_SHIFT;
+
+ GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
+ val &= GEN8_RPCS_SS_CNT_MASK;
+
+ rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
+ }
+
+ if (sseu->has_eu_pg) {
+ u32 val;
+
+ val = req_sseu->min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
+ GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
+ val &= GEN8_RPCS_EU_MIN_MASK;
+
+ rpcs |= val;
+
+ val = req_sseu->max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
+ GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
+ val &= GEN8_RPCS_EU_MAX_MASK;
+
+ rpcs |= val;
+
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
+
+ return rpcs;
+}
+
+void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
+{
+ int s;
+
+ if (sseu->has_xehp_dss) {
+ drm_printf(p, "subslice total: %u\n",
+ intel_sseu_subslice_total(sseu));
+ drm_printf(p, "geometry dss mask=%*pb\n",
+ XEHP_BITMAP_BITS(sseu->geometry_subslice_mask),
+ sseu->geometry_subslice_mask.xehp);
+ drm_printf(p, "compute dss mask=%*pb\n",
+ XEHP_BITMAP_BITS(sseu->compute_subslice_mask),
+ sseu->compute_subslice_mask.xehp);
+ } else {
+ drm_printf(p, "slice total: %u, mask=%04x\n",
+ hweight8(sseu->slice_mask), sseu->slice_mask);
+ drm_printf(p, "subslice total: %u\n",
+ intel_sseu_subslice_total(sseu));
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ u8 ss_mask = sseu->subslice_mask.hsw[s];
+
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
+ s, hweight8(ss_mask), ss_mask);
+ }
+ }
+
+ drm_printf(p, "EU total: %u\n", sseu->eu_total);
+ drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
+ drm_printf(p, "has slice power gating: %s\n",
+ str_yes_no(sseu->has_slice_pg));
+ drm_printf(p, "has subslice power gating: %s\n",
+ str_yes_no(sseu->has_subslice_pg));
+ drm_printf(p, "has EU power gating: %s\n",
+ str_yes_no(sseu->has_eu_pg));
+}
+
+static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ int s, ss;
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ u8 ss_mask = sseu->subslice_mask.hsw[s];
+
+ drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
+ s, hweight8(ss_mask), ss_mask);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u16 enabled_eus = sseu_get_eus(sseu, s, ss);
+
+ drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
+ ss, hweight16(enabled_eus), enabled_eus);
+ }
+ }
+}
+
+static void sseu_print_xehp_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ int dss;
+
+ for (dss = 0; dss < sseu->max_subslices; dss++) {
+ u16 enabled_eus = sseu_get_eus(sseu, 0, dss);
+
+ drm_printf(p, "DSS_%02d: G:%3s C:%3s, %2u EUs (0x%04hx)\n", dss,
+ str_yes_no(test_bit(dss, sseu->geometry_subslice_mask.xehp)),
+ str_yes_no(test_bit(dss, sseu->compute_subslice_mask.xehp)),
+ hweight16(enabled_eus), enabled_eus);
+ }
+}
+
+void intel_sseu_print_topology(struct drm_i915_private *i915,
+ const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ if (sseu->max_slices == 0) {
+ drm_printf(p, "Unavailable\n");
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ sseu_print_xehp_topology(sseu, p);
+ } else {
+ sseu_print_hsw_topology(sseu, p);
+ }
+}
+
+void intel_sseu_print_ss_info(const char *type,
+ const struct sseu_dev_info *sseu,
+ struct seq_file *m)
+{
+ int s;
+
+ if (sseu->has_xehp_dss) {
+ seq_printf(m, " %s Geometry DSS: %u\n", type,
+ bitmap_weight(sseu->geometry_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->geometry_subslice_mask)));
+ seq_printf(m, " %s Compute DSS: %u\n", type,
+ bitmap_weight(sseu->compute_subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->compute_subslice_mask)));
+ } else {
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ seq_printf(m, " %s Slice%i subslices: %u\n", type,
+ s, hweight8(sseu->subslice_mask.hsw[s]));
+ }
+}
+
+u16 intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask,
+ int dss_per_slice)
+{
+ intel_sseu_ss_mask_t per_slice_mask = {};
+ unsigned long slice_mask = 0;
+ int i;
+
+ WARN_ON(DIV_ROUND_UP(XEHP_BITMAP_BITS(dss_mask), dss_per_slice) >
+ 8 * sizeof(slice_mask));
+
+ bitmap_fill(per_slice_mask.xehp, dss_per_slice);
+ for (i = 0; !bitmap_empty(dss_mask.xehp, XEHP_BITMAP_BITS(dss_mask)); i++) {
+ if (bitmap_intersects(dss_mask.xehp, per_slice_mask.xehp, dss_per_slice))
+ slice_mask |= BIT(i);
+
+ bitmap_shift_right(dss_mask.xehp, dss_mask.xehp, dss_per_slice,
+ XEHP_BITMAP_BITS(dss_mask));
+ }
+
+ return slice_mask;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
new file mode 100644
index 000000000..d7e8c374f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SSEU_H__
+#define __INTEL_SSEU_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "i915_gem.h"
+
+struct drm_i915_private;
+struct intel_gt;
+struct drm_printer;
+
+/*
+ * Maximum number of slices on older platforms. Slices no longer exist
+ * starting on Xe_HP ("gslices," "cslices," etc. are a different concept and
+ * are not expressed through fusing).
+ */
+#define GEN_MAX_HSW_SLICES 3
+
+/*
+ * Maximum number of subslices that can exist within a HSW-style slice. This
+ * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
+ * I915_MAX_SS_FUSE_BITS value below).
+ */
+#define GEN_MAX_SS_PER_HSW_SLICE 8
+
+/*
+ * Maximum number of 32-bit registers used by hardware to express the
+ * enabled/disabled subslices.
+ */
+#define I915_MAX_SS_FUSE_REGS 2
+#define I915_MAX_SS_FUSE_BITS (I915_MAX_SS_FUSE_REGS * 32)
+
+/* Maximum number of EUs that can exist within a subslice or DSS. */
+#define GEN_MAX_EUS_PER_SS 16
+
+#define SSEU_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* The maximum number of bits needed to express each subslice/DSS independently */
+#define GEN_SS_MASK_SIZE SSEU_MAX(I915_MAX_SS_FUSE_BITS, \
+ GEN_MAX_HSW_SLICES * GEN_MAX_SS_PER_HSW_SLICE)
+
+#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
+#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_SS_MASK_SIZE)
+#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS_PER_SS)
+
+#define GEN_DSS_PER_GSLICE 4
+#define GEN_DSS_PER_CSLICE 8
+#define GEN_DSS_PER_MSLICE 8
+
+#define GEN_MAX_GSLICES (I915_MAX_SS_FUSE_BITS / GEN_DSS_PER_GSLICE)
+#define GEN_MAX_CSLICES (I915_MAX_SS_FUSE_BITS / GEN_DSS_PER_CSLICE)
+
+typedef union {
+ u8 hsw[GEN_MAX_HSW_SLICES];
+
+ /* Bitmap compatible with linux/bitmap.h; may exceed size of u64 */
+ unsigned long xehp[BITS_TO_LONGS(I915_MAX_SS_FUSE_BITS)];
+} intel_sseu_ss_mask_t;
+
+#define XEHP_BITMAP_BITS(mask) ((int)BITS_PER_TYPE(typeof(mask.xehp)))
+
+struct sseu_dev_info {
+ u8 slice_mask;
+ intel_sseu_ss_mask_t subslice_mask;
+ intel_sseu_ss_mask_t geometry_subslice_mask;
+ intel_sseu_ss_mask_t compute_subslice_mask;
+ union {
+ u16 hsw[GEN_MAX_HSW_SLICES][GEN_MAX_SS_PER_HSW_SLICE];
+ u16 xehp[I915_MAX_SS_FUSE_BITS];
+ } eu_mask;
+
+ u16 eu_total;
+ u8 eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+ /*
+ * For Xe_HP and beyond, the hardware no longer has traditional slices
+ * so we just report the entire DSS pool under a fake "slice 0."
+ */
+ u8 has_xehp_dss:1;
+
+ /* Topology fields */
+ u8 max_slices;
+ u8 max_subslices;
+ u8 max_eus_per_subslice;
+};
+
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 min_eus_per_subslice;
+ u8 max_eus_per_subslice;
+};
+
+static inline struct intel_sseu
+intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
+{
+ struct intel_sseu value = {
+ .slice_mask = sseu->slice_mask,
+ .subslice_mask = sseu->subslice_mask.hsw[0],
+ .min_eus_per_subslice = sseu->max_eus_per_subslice,
+ .max_eus_per_subslice = sseu->max_eus_per_subslice,
+ };
+
+ return value;
+}
+
+static inline bool
+intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ if (slice >= sseu->max_slices ||
+ subslice >= sseu->max_subslices)
+ return false;
+
+ if (sseu->has_xehp_dss)
+ return test_bit(subslice, sseu->subslice_mask.xehp);
+ else
+ return sseu->subslice_mask.hsw[slice] & BIT(subslice);
+}
+
+/*
+ * Used to obtain the index of the first DSS. Can start searching from the
+ * beginning of a specific dss group (e.g., gslice, cslice, etc.) if
+ * groupsize and groupnum are non-zero.
+ */
+static inline unsigned int
+intel_sseu_find_first_xehp_dss(const struct sseu_dev_info *sseu, int groupsize,
+ int groupnum)
+{
+ return find_next_bit(sseu->subslice_mask.xehp,
+ XEHP_BITMAP_BITS(sseu->subslice_mask),
+ groupnum * groupsize);
+}
+
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice);
+
+unsigned int
+intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
+
+unsigned int
+intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice);
+
+intel_sseu_ss_mask_t
+intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu);
+
+void intel_sseu_info_init(struct intel_gt *gt);
+
+u32 intel_sseu_make_rpcs(struct intel_gt *gt,
+ const struct intel_sseu *req_sseu);
+
+void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
+void intel_sseu_print_topology(struct drm_i915_private *i915,
+ const struct sseu_dev_info *sseu,
+ struct drm_printer *p);
+
+u16 intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask, int dss_per_slice);
+
+int intel_sseu_copy_eumask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu);
+int intel_sseu_copy_ssmask_to_user(void __user *to,
+ const struct sseu_dev_info *sseu);
+
+void intel_sseu_print_ss_info(const char *type,
+ const struct sseu_dev_info *sseu,
+ struct seq_file *m);
+
+#endif /* __INTEL_SSEU_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
new file mode 100644
index 000000000..c2ee5e182
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/bitmap.h>
+#include <linux/string_helpers.h>
+
+#include "i915_drv.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_regs.h"
+#include "intel_sseu_debugfs.h"
+
+static void cherryview_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 2
+ struct intel_uncore *uncore = gt->uncore;
+ const int ss_max = SS_MAX;
+ u32 sig1[SS_MAX], sig2[SS_MAX];
+ int ss;
+
+ sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1);
+ sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1);
+ sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2);
+ sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2);
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (sig1[ss] & CHV_SS_PG_ENABLE)
+ /* skip disabled subslice */
+ continue;
+
+ sseu->slice_mask = BIT(0);
+ sseu->subslice_mask.hsw[0] |= BIT(ss);
+ eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+ ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice, eu_cnt);
+ }
+#undef SS_MAX
+}
+
+static void gen11_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 8
+ struct intel_uncore *uncore = gt->uncore;
+ const struct intel_gt_info *info = &gt->info;
+ u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
+ int s, ss;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ /*
+ * FIXME: Valid SS Mask respects the spec and read
+ * only valid bits for those registers, excluding reserved
+ * although this seems wrong because it would leave many
+ * subslices without ACK.
+ */
+ s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) &
+ GEN10_PGCTL_VALID_SS_MASK(s);
+ eu_reg[2 * s] = intel_uncore_read(uncore,
+ GEN10_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2 * s + 1] = intel_uncore_read(uncore,
+ GEN10_SS23_EU_PGCTL_ACK(s));
+ }
+
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
+
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+ unsigned int eu_cnt;
+
+ if (info->sseu.has_subslice_pg &&
+ !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
+ eu_mask[ss % 2]);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
+ }
+ }
+#undef SS_MAX
+}
+
+static void gen9_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+#define SS_MAX 3
+ struct intel_uncore *uncore = gt->uncore;
+ const struct intel_gt_info *info = &gt->info;
+ u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
+ int s, ss;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s));
+ eu_reg[2 * s] =
+ intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2 * s + 1] =
+ intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s));
+ }
+
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ if (IS_GEN9_BC(gt->i915))
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
+
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+ unsigned int eu_cnt;
+
+ if (IS_GEN9_LP(gt->i915)) {
+ if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ sseu->subslice_mask.hsw[s] |= BIT(ss);
+ }
+
+ eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
+ eu_cnt = 2 * hweight32(eu_cnt);
+
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
+ }
+ }
+#undef SS_MAX
+}
+
+static void bdw_sseu_device_status(struct intel_gt *gt,
+ struct sseu_dev_info *sseu)
+{
+ const struct intel_gt_info *info = &gt->info;
+ u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO);
+ int s;
+
+ sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
+
+ if (sseu->slice_mask) {
+ sseu->eu_per_subslice = info->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
+ sseu->eu_total = sseu->eu_per_subslice *
+ intel_sseu_subslice_total(sseu);
+
+ /* subtract fused off EU(s) from enabled slice(s) */
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ u8 subslice_7eu = info->sseu.subslice_7eu[s];
+
+ sseu->eu_total -= hweight8(subslice_7eu);
+ }
+ }
+}
+
+static void i915_print_sseu_info(struct seq_file *m,
+ bool is_available_info,
+ bool has_pooled_eu,
+ const struct sseu_dev_info *sseu)
+{
+ const char *type = is_available_info ? "Available" : "Enabled";
+
+ seq_printf(m, " %s Slice Mask: %04x\n", type,
+ sseu->slice_mask);
+ seq_printf(m, " %s Slice Total: %u\n", type,
+ hweight8(sseu->slice_mask));
+ seq_printf(m, " %s Subslice Total: %u\n", type,
+ intel_sseu_subslice_total(sseu));
+ intel_sseu_print_ss_info(type, sseu, m);
+ seq_printf(m, " %s EU Total: %u\n", type,
+ sseu->eu_total);
+ seq_printf(m, " %s EU Per Subslice: %u\n", type,
+ sseu->eu_per_subslice);
+
+ if (!is_available_info)
+ return;
+
+ seq_printf(m, " Has Pooled EU: %s\n", str_yes_no(has_pooled_eu));
+ if (has_pooled_eu)
+ seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
+
+ seq_printf(m, " Has Slice Power Gating: %s\n",
+ str_yes_no(sseu->has_slice_pg));
+ seq_printf(m, " Has Subslice Power Gating: %s\n",
+ str_yes_no(sseu->has_subslice_pg));
+ seq_printf(m, " Has EU Power Gating: %s\n",
+ str_yes_no(sseu->has_eu_pg));
+}
+
+/*
+ * this is called from top-level debugfs as well, so we can't get the gt from
+ * the seq_file.
+ */
+int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ const struct intel_gt_info *info = &gt->info;
+ struct sseu_dev_info *sseu;
+ intel_wakeref_t wakeref;
+
+ if (GRAPHICS_VER(i915) < 8)
+ return -ENODEV;
+
+ seq_puts(m, "SSEU Device Info\n");
+ i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
+
+ seq_puts(m, "SSEU Device Status\n");
+
+ sseu = kzalloc(sizeof(*sseu), GFP_KERNEL);
+ if (!sseu)
+ return -ENOMEM;
+
+ intel_sseu_set_info(sseu, info->sseu.max_slices,
+ info->sseu.max_subslices,
+ info->sseu.max_eus_per_subslice);
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ if (IS_CHERRYVIEW(i915))
+ cherryview_sseu_device_status(gt, sseu);
+ else if (IS_BROADWELL(i915))
+ bdw_sseu_device_status(gt, sseu);
+ else if (GRAPHICS_VER(i915) == 9)
+ gen9_sseu_device_status(gt, sseu);
+ else if (GRAPHICS_VER(i915) >= 11)
+ gen11_sseu_device_status(gt, sseu);
+ }
+
+ i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), sseu);
+
+ kfree(sseu);
+
+ return 0;
+}
+
+static int sseu_status_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+
+ return intel_sseu_status(m, gt);
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status);
+
+static int sseu_topology_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_sseu_print_topology(gt->i915, &gt->info.sseu, &p);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_topology);
+
+void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "sseu_status", &sseu_status_fops, NULL },
+ { "sseu_topology", &sseu_topology_fops, NULL },
+ };
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h
new file mode 100644
index 000000000..73f001589
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef INTEL_SSEU_DEBUGFS_H
+#define INTEL_SSEU_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+struct seq_file;
+
+int intel_sseu_status(struct seq_file *m, struct intel_gt *gt);
+void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* INTEL_SSEU_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
new file mode 100644
index 000000000..b9640212d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2018 Intel Corporation
+ */
+
+#include <drm/drm_cache.h>
+
+#include "gem/i915_gem_internal.h"
+
+#include "i915_active.h"
+#include "i915_drv.h"
+#include "i915_syncmap.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
+
+#define TIMELINE_SEQNO_BYTES 8
+
+static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma))
+ i915_gem_object_put(obj);
+
+ return vma;
+}
+
+static void __timeline_retire(struct i915_active *active)
+{
+ struct intel_timeline *tl =
+ container_of(active, typeof(*tl), active);
+
+ i915_vma_unpin(tl->hwsp_ggtt);
+ intel_timeline_put(tl);
+}
+
+static int __timeline_active(struct i915_active *active)
+{
+ struct intel_timeline *tl =
+ container_of(active, typeof(*tl), active);
+
+ __i915_vma_pin(tl->hwsp_ggtt);
+ intel_timeline_get(tl);
+ return 0;
+}
+
+I915_SELFTEST_EXPORT int
+intel_timeline_pin_map(struct intel_timeline *timeline)
+{
+ struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
+ u32 ofs = offset_in_page(timeline->hwsp_offset);
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ timeline->hwsp_map = vaddr;
+ timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
+ drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
+
+ return 0;
+}
+
+static int intel_timeline_init(struct intel_timeline *timeline,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp,
+ unsigned int offset)
+{
+ kref_init(&timeline->kref);
+ atomic_set(&timeline->pin_count, 0);
+
+ timeline->gt = gt;
+
+ if (hwsp) {
+ timeline->hwsp_offset = offset;
+ timeline->hwsp_ggtt = i915_vma_get(hwsp);
+ } else {
+ timeline->has_initial_breadcrumb = true;
+ hwsp = hwsp_alloc(gt);
+ if (IS_ERR(hwsp))
+ return PTR_ERR(hwsp);
+ timeline->hwsp_ggtt = hwsp;
+ }
+
+ timeline->hwsp_map = NULL;
+ timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
+
+ GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
+
+ timeline->fence_context = dma_fence_context_alloc(1);
+
+ mutex_init(&timeline->mutex);
+
+ INIT_ACTIVE_FENCE(&timeline->last_request);
+ INIT_LIST_HEAD(&timeline->requests);
+
+ i915_syncmap_init(&timeline->sync);
+ i915_active_init(&timeline->active, __timeline_active,
+ __timeline_retire, 0);
+
+ return 0;
+}
+
+void intel_gt_init_timelines(struct intel_gt *gt)
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+
+ spin_lock_init(&timelines->lock);
+ INIT_LIST_HEAD(&timelines->active_list);
+}
+
+static void intel_timeline_fini(struct rcu_head *rcu)
+{
+ struct intel_timeline *timeline =
+ container_of(rcu, struct intel_timeline, rcu);
+
+ if (timeline->hwsp_map)
+ i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+
+ i915_vma_put(timeline->hwsp_ggtt);
+ i915_active_fini(&timeline->active);
+
+ /*
+ * A small race exists between intel_gt_retire_requests_timeout and
+ * intel_timeline_exit which could result in the syncmap not getting
+ * free'd. Rather than work to hard to seal this race, simply cleanup
+ * the syncmap on fini.
+ */
+ i915_syncmap_free(&timeline->sync);
+
+ kfree(timeline);
+}
+
+struct intel_timeline *
+__intel_timeline_create(struct intel_gt *gt,
+ struct i915_vma *global_hwsp,
+ unsigned int offset)
+{
+ struct intel_timeline *timeline;
+ int err;
+
+ timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
+ if (!timeline)
+ return ERR_PTR(-ENOMEM);
+
+ err = intel_timeline_init(timeline, gt, global_hwsp, offset);
+ if (err) {
+ kfree(timeline);
+ return ERR_PTR(err);
+ }
+
+ return timeline;
+}
+
+struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+ unsigned int offset)
+{
+ struct i915_vma *hwsp = engine->status_page.vma;
+ struct intel_timeline *tl;
+
+ tl = __intel_timeline_create(engine->gt, hwsp, offset);
+ if (IS_ERR(tl))
+ return tl;
+
+ /* Borrow a nearby lock; we only create these timelines during init */
+ mutex_lock(&hwsp->vm->mutex);
+ list_add_tail(&tl->engine_link, &engine->status_page.timelines);
+ mutex_unlock(&hwsp->vm->mutex);
+
+ return tl;
+}
+
+void __intel_timeline_pin(struct intel_timeline *tl)
+{
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ atomic_inc(&tl->pin_count);
+}
+
+int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
+{
+ int err;
+
+ if (atomic_add_unless(&tl->pin_count, 1, 0))
+ return 0;
+
+ if (!tl->hwsp_map) {
+ err = intel_timeline_pin_map(tl);
+ if (err)
+ return err;
+ }
+
+ err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
+ if (err)
+ return err;
+
+ tl->hwsp_offset =
+ i915_ggtt_offset(tl->hwsp_ggtt) +
+ offset_in_page(tl->hwsp_offset);
+ GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ tl->fence_context, tl->hwsp_offset);
+
+ i915_active_acquire(&tl->active);
+ if (atomic_fetch_inc(&tl->pin_count)) {
+ i915_active_release(&tl->active);
+ __i915_vma_unpin(tl->hwsp_ggtt);
+ }
+
+ return 0;
+}
+
+void intel_timeline_reset_seqno(const struct intel_timeline *tl)
+{
+ u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
+ /* Must be pinned to be writable, and no requests in flight. */
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+
+ memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
+ WRITE_ONCE(*hwsp_seqno, tl->seqno);
+ drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
+}
+
+void intel_timeline_enter(struct intel_timeline *tl)
+{
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+ /*
+ * Pretend we are serialised by the timeline->mutex.
+ *
+ * While generally true, there are a few exceptions to the rule
+ * for the engine->kernel_context being used to manage power
+ * transitions. As the engine_park may be called from under any
+ * timeline, it uses the power mutex as a global serialisation
+ * lock to prevent any other request entering its timeline.
+ *
+ * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
+ *
+ * However, intel_gt_retire_request() does not know which engine
+ * it is retiring along and so cannot partake in the engine-pm
+ * barrier, and there we use the tl->active_count as a means to
+ * pin the timeline in the active_list while the locks are dropped.
+ * Ergo, as that is outside of the engine-pm barrier, we need to
+ * use atomic to manipulate tl->active_count.
+ */
+ lockdep_assert_held(&tl->mutex);
+
+ if (atomic_add_unless(&tl->active_count, 1, 0))
+ return;
+
+ spin_lock(&timelines->lock);
+ if (!atomic_fetch_inc(&tl->active_count)) {
+ /*
+ * The HWSP is volatile, and may have been lost while inactive,
+ * e.g. across suspend/resume. Be paranoid, and ensure that
+ * the HWSP value matches our seqno so we don't proclaim
+ * the next request as already complete.
+ */
+ intel_timeline_reset_seqno(tl);
+ list_add_tail(&tl->link, &timelines->active_list);
+ }
+ spin_unlock(&timelines->lock);
+}
+
+void intel_timeline_exit(struct intel_timeline *tl)
+{
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+ /* See intel_timeline_enter() */
+ lockdep_assert_held(&tl->mutex);
+
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ if (atomic_add_unless(&tl->active_count, -1, 1))
+ return;
+
+ spin_lock(&timelines->lock);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+ spin_unlock(&timelines->lock);
+
+ /*
+ * Since this timeline is idle, all bariers upon which we were waiting
+ * must also be complete and so we can discard the last used barriers
+ * without loss of information.
+ */
+ i915_syncmap_free(&tl->sync);
+}
+
+static u32 timeline_advance(struct intel_timeline *tl)
+{
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
+
+ return tl->seqno += 1 + tl->has_initial_breadcrumb;
+}
+
+static noinline int
+__intel_timeline_get_seqno(struct intel_timeline *tl,
+ u32 *seqno)
+{
+ u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
+
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5)))
+ next_ofs = offset_in_page(next_ofs + BIT(5));
+
+ tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
+ tl->hwsp_seqno = tl->hwsp_map + next_ofs;
+ intel_timeline_reset_seqno(tl);
+
+ *seqno = timeline_advance(tl);
+ GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
+ return 0;
+}
+
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
+{
+ *seqno = timeline_advance(tl);
+
+ /* Replace the HWSP on wraparound for HW semaphores */
+ if (unlikely(!*seqno && tl->has_initial_breadcrumb))
+ return __intel_timeline_get_seqno(tl, seqno);
+
+ return 0;
+}
+
+int intel_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *to,
+ u32 *hwsp)
+{
+ struct intel_timeline *tl;
+ int err;
+
+ rcu_read_lock();
+ tl = rcu_dereference(from->timeline);
+ if (i915_request_signaled(from) ||
+ !i915_active_acquire_if_busy(&tl->active))
+ tl = NULL;
+
+ if (tl) {
+ /* hwsp_offset may wraparound, so use from->hwsp_seqno */
+ *hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
+ offset_in_page(from->hwsp_seqno);
+ }
+
+ /* ensure we wait on the right request, if not, we completed */
+ if (tl && __i915_request_is_complete(from)) {
+ i915_active_release(&tl->active);
+ tl = NULL;
+ }
+ rcu_read_unlock();
+
+ if (!tl)
+ return 1;
+
+ /* Can't do semaphore waits on kernel context */
+ if (!tl->has_initial_breadcrumb) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = i915_active_add_request(&tl->active, to);
+
+out:
+ i915_active_release(&tl->active);
+ return err;
+}
+
+void intel_timeline_unpin(struct intel_timeline *tl)
+{
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ if (!atomic_dec_and_test(&tl->pin_count))
+ return;
+
+ i915_active_release(&tl->active);
+ __i915_vma_unpin(tl->hwsp_ggtt);
+}
+
+void __intel_timeline_free(struct kref *kref)
+{
+ struct intel_timeline *timeline =
+ container_of(kref, typeof(*timeline), kref);
+
+ GEM_BUG_ON(atomic_read(&timeline->pin_count));
+ GEM_BUG_ON(!list_empty(&timeline->requests));
+ GEM_BUG_ON(timeline->retire);
+
+ call_rcu(&timeline->rcu, intel_timeline_fini);
+}
+
+void intel_gt_fini_timelines(struct intel_gt *gt)
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+
+ GEM_BUG_ON(!list_empty(&timelines->active_list));
+}
+
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent))
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ LIST_HEAD(free);
+
+ spin_lock(&timelines->lock);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ unsigned long count, ready, inflight;
+ struct i915_request *rq, *rn;
+ struct dma_fence *fence;
+
+ if (!mutex_trylock(&tl->mutex)) {
+ drm_printf(m, "Timeline %llx: busy; skipping\n",
+ tl->fence_context);
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!atomic_read(&tl->active_count));
+ atomic_inc(&tl->active_count); /* pin the list element */
+ spin_unlock(&timelines->lock);
+
+ count = 0;
+ ready = 0;
+ inflight = 0;
+ list_for_each_entry_safe(rq, rn, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ continue;
+
+ count++;
+ if (i915_request_is_ready(rq))
+ ready++;
+ if (i915_request_is_active(rq))
+ inflight++;
+ }
+
+ drm_printf(m, "Timeline %llx: { ", tl->fence_context);
+ drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
+ count, ready, inflight);
+ drm_printf(m, ", seqno: { current: %d, last: %d }",
+ *tl->hwsp_seqno, tl->seqno);
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ drm_printf(m, ", engine: %s",
+ to_request(fence)->engine->name);
+ dma_fence_put(fence);
+ }
+ drm_printf(m, " }\n");
+
+ if (show_request) {
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ show_request(m, rq, "", 2);
+ }
+
+ mutex_unlock(&tl->mutex);
+ spin_lock(&timelines->lock);
+
+ /* Resume list iteration after reacquiring spinlock */
+ list_safe_reset_next(tl, tn, link);
+ if (atomic_dec_and_test(&tl->active_count))
+ list_del(&tl->link);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(atomic_read(&tl->active_count));
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock(&timelines->lock);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "gt/selftests/mock_timeline.c"
+#include "gt/selftest_timeline.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
new file mode 100644
index 000000000..57308c4d6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef I915_TIMELINE_H
+#define I915_TIMELINE_H
+
+#include <linux/lockdep.h>
+
+#include "i915_active.h"
+#include "i915_syncmap.h"
+#include "intel_timeline_types.h"
+
+struct drm_printer;
+
+struct intel_timeline *
+__intel_timeline_create(struct intel_gt *gt,
+ struct i915_vma *global_hwsp,
+ unsigned int offset);
+
+static inline struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt)
+{
+ return __intel_timeline_create(gt, NULL, 0);
+}
+
+struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+ unsigned int offset);
+
+static inline struct intel_timeline *
+intel_timeline_get(struct intel_timeline *timeline)
+{
+ kref_get(&timeline->kref);
+ return timeline;
+}
+
+void __intel_timeline_free(struct kref *kref);
+static inline void intel_timeline_put(struct intel_timeline *timeline)
+{
+ kref_put(&timeline->kref, __intel_timeline_free);
+}
+
+static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_set(&tl->sync, context, seqno);
+}
+
+static inline int intel_timeline_sync_set(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+}
+
+static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_is_later(&tl->sync, context, seqno);
+}
+
+static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+}
+
+void __intel_timeline_pin(struct intel_timeline *tl);
+int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww);
+void intel_timeline_enter(struct intel_timeline *tl);
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno);
+void intel_timeline_exit(struct intel_timeline *tl);
+void intel_timeline_unpin(struct intel_timeline *tl);
+
+void intel_timeline_reset_seqno(const struct intel_timeline *tl);
+
+int intel_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *until,
+ u32 *hwsp_offset);
+
+void intel_gt_init_timelines(struct intel_gt *gt);
+void intel_gt_fini_timelines(struct intel_gt *gt);
+
+void intel_gt_show_timelines(struct intel_gt *gt,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent));
+
+static inline bool
+intel_timeline_is_last(const struct intel_timeline *tl,
+ const struct i915_request *rq)
+{
+ return list_is_last_rcu(&rq->link, &tl->requests);
+}
+
+I915_SELFTEST_DECLARE(int intel_timeline_pin_map(struct intel_timeline *tl));
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
new file mode 100644
index 000000000..74e67dbf8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_TIMELINE_TYPES_H__
+#define __I915_TIMELINE_TYPES_H__
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+
+struct i915_vma;
+struct i915_syncmap;
+struct intel_gt;
+
+struct intel_timeline {
+ u64 fence_context;
+ u32 seqno;
+
+ struct mutex mutex; /* protects the flow of requests */
+
+ /*
+ * pin_count and active_count track essentially the same thing:
+ * How many requests are in flight or may be under construction.
+ *
+ * We need two distinct counters so that we can assign different
+ * lifetimes to the events for different use-cases. For example,
+ * we want to permanently keep the timeline pinned for the kernel
+ * context so that we can issue requests at any time without having
+ * to acquire space in the GGTT. However, we want to keep tracking
+ * the activity (to be able to detect when we become idle) along that
+ * permanently pinned timeline and so end up requiring two counters.
+ *
+ * Note that the active_count is protected by the intel_timeline.mutex,
+ * but the pin_count is protected by a combination of serialisation
+ * from the intel_context caller plus internal atomicity.
+ */
+ atomic_t pin_count;
+ atomic_t active_count;
+
+ void *hwsp_map;
+ const u32 *hwsp_seqno;
+ struct i915_vma *hwsp_ggtt;
+ u32 hwsp_offset;
+
+ bool has_initial_breadcrumb;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /*
+ * Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_active_fence_get(), or manage the RCU
+ * protection themselves (cf the i915_active_fence API).
+ */
+ struct i915_active_fence last_request;
+
+ struct i915_active active;
+
+ /** A chain of completed timelines ready for early retirement. */
+ struct intel_timeline *retire;
+
+ /**
+ * We track the most recent seqno that we wait on in every context so
+ * that we only have to emit a new await and dependency on a more
+ * recent sync point. As the contexts may be executed out-of-order, we
+ * have to track each individually and can not rely on an absolute
+ * global_seqno. When we know that all tracked fences are completed
+ * (i.e. when the driver is idle), we know that the syncmap is
+ * redundant and we can discard it without loss of generality.
+ */
+ struct i915_syncmap *sync;
+
+ struct list_head link;
+ struct intel_gt *gt;
+
+ struct list_head engine_link;
+
+ struct kref kref;
+ struct rcu_head rcu;
+};
+
+#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
new file mode 100644
index 000000000..86621dff8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -0,0 +1,3031 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_mcr.h"
+#include "intel_gt_regs.h"
+#include "intel_ring.h"
+#include "intel_workarounds.h"
+
+/**
+ * DOC: Hardware workarounds
+ *
+ * This file is intended as a central place to implement most [1]_ of the
+ * required workarounds for hardware to work as originally intended. They fall
+ * in five basic categories depending on how/when they are applied:
+ *
+ * - Workarounds that touch registers that are saved/restored to/from the HW
+ * context image. The list is emitted (via Load Register Immediate commands)
+ * everytime a new context is created.
+ * - GT workarounds. The list of these WAs is applied whenever these registers
+ * revert to default values (on GPU reset, suspend/resume [2]_, etc..).
+ * - Display workarounds. The list is applied during display clock-gating
+ * initialization.
+ * - Workarounds that whitelist a privileged register, so that UMDs can manage
+ * them directly. This is just a special case of a MMMIO workaround (as we
+ * write the list of these to/be-whitelisted registers to some special HW
+ * registers).
+ * - Workaround batchbuffers, that get executed automatically by the hardware
+ * on every HW context restore.
+ *
+ * .. [1] Please notice that there are other WAs that, due to their nature,
+ * cannot be applied from a central place. Those are peppered around the rest
+ * of the code, as needed.
+ *
+ * .. [2] Technically, some registers are powercontext saved & restored, so they
+ * survive a suspend/resume. In practice, writing them again is not too
+ * costly and simplifies things. We can revisit this in the future.
+ *
+ * Layout
+ * ~~~~~~
+ *
+ * Keep things in this file ordered by WA type, as per the above (context, GT,
+ * display, register whitelist, batchbuffer). Then, inside each type, keep the
+ * following order:
+ *
+ * - Infrastructure functions and macros
+ * - WAs per platform in standard gen/chrono order
+ * - Public functions to init or apply the given workaround type.
+ */
+
+static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
+{
+ wal->name = name;
+ wal->engine_name = engine_name;
+}
+
+#define WA_LIST_CHUNK (1 << 4)
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+ /* Trim unused entries. */
+ if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
+ struct i915_wa *list = kmemdup(wal->list,
+ wal->count * sizeof(*list),
+ GFP_KERNEL);
+
+ if (list) {
+ kfree(wal->list);
+ wal->list = list;
+ }
+ }
+
+ if (!wal->count)
+ return;
+
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
+ wal->wa_count, wal->name, wal->engine_name);
+}
+
+static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+ unsigned int addr = i915_mmio_reg_offset(wa->reg);
+ unsigned int start = 0, end = wal->count;
+ const unsigned int grow = WA_LIST_CHUNK;
+ struct i915_wa *wa_;
+
+ GEM_BUG_ON(!is_power_of_2(grow));
+
+ if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+ struct i915_wa *list;
+
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ GFP_KERNEL);
+ if (!list) {
+ DRM_ERROR("No space for workaround init!\n");
+ return;
+ }
+
+ if (wal->list) {
+ memcpy(list, wal->list, sizeof(*wa) * wal->count);
+ kfree(wal->list);
+ }
+
+ wal->list = list;
+ }
+
+ while (start < end) {
+ unsigned int mid = start + (end - start) / 2;
+
+ if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
+ start = mid + 1;
+ } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
+ end = mid;
+ } else {
+ wa_ = &wal->list[mid];
+
+ if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
+ DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
+ i915_mmio_reg_offset(wa_->reg),
+ wa_->clr, wa_->set);
+
+ wa_->set &= ~wa->clr;
+ }
+
+ wal->wa_count++;
+ wa_->set |= wa->set;
+ wa_->clr |= wa->clr;
+ wa_->read |= wa->read;
+ return;
+ }
+ }
+
+ wal->wa_count++;
+ wa_ = &wal->list[wal->count++];
+ *wa_ = *wa;
+
+ while (wa_-- > wal->list) {
+ GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
+ i915_mmio_reg_offset(wa_[1].reg));
+ if (i915_mmio_reg_offset(wa_[1].reg) >
+ i915_mmio_reg_offset(wa_[0].reg))
+ break;
+
+ swap(wa_[1], wa_[0]);
+ }
+}
+
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 clear, u32 set, u32 read_mask, bool masked_reg)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .clr = clear,
+ .set = set,
+ .read = read_mask,
+ .masked_reg = masked_reg,
+ };
+
+ _wa_add(wal, &wa);
+}
+
+static void
+wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
+{
+ wa_add(wal, reg, clear, set, clear, false);
+}
+
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
+{
+ wa_write_clr_set(wal, reg, ~0, set);
+}
+
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
+{
+ wa_write_clr_set(wal, reg, set, set);
+}
+
+static void
+wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
+{
+ wa_write_clr_set(wal, reg, clr, 0);
+}
+
+/*
+ * WA operations on "masked register". A masked register has the upper 16 bits
+ * documented as "masked" in b-spec. Its purpose is to allow writing to just a
+ * portion of the register without a rmw: you simply write in the upper 16 bits
+ * the mask of bits you are going to modify.
+ *
+ * The wa_masked_* family of functions already does the necessary operations to
+ * calculate the mask based on the parameters passed, so user only has to
+ * provide the lower 16 bits of that register.
+ */
+
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
+}
+
+static void
+wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
+}
+
+static void
+wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 mask, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
+}
+
+static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+}
+
+static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+}
+
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisableAsyncFlipPerfMode:bdw,chv */
+ wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
+
+ /* WaDisablePartialInstShootdown:bdw,chv */
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:bdw,chv */
+ /* WaHdcDisableFetchWhenMasked:bdw,chv */
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ HDC_FORCE_NON_COHERENT);
+
+ /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+ * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+ * polygons in the same 8x4 pixel/sample area to be processed without
+ * stalling waiting for the earlier ones to write to Hierarchical Z
+ * buffer."
+ *
+ * This optimization is off by default for BDW and CHV; turn it on.
+ */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* Wa4x4STCOptimizationDisable:bdw,chv */
+ wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_masked_field_set(wal, GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+}
+
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ gen8_ctx_workarounds_init(engine, wal);
+
+ /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+ /* WaDisableDopClockGating:bdw
+ *
+ * Also see the related UCGTCL1 write in bdw_init_clock_gating()
+ * to disable EUTC clock gating.
+ */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
+
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+ wa_masked_en(wal, HDC_CHICKEN0,
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+}
+
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ gen8_ctx_workarounds_init(engine, wal);
+
+ /* WaDisableThreadStallDopClockGating:chv */
+ wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+ /* Improve HiZ throughput on CHV. */
+ wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+}
+
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (HAS_LLC(i915)) {
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
+ *
+ * Must match Display Engine. See
+ * WaCompressedResourceDisplayNewHashMode.
+ */
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN9_PBE_COMPRESSED_HASH_SELECTION);
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+ }
+
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
+ GEN9_ENABLE_GPGPU_PREEMPTION);
+
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
+ wa_masked_en(wal, CACHE_MODE_1,
+ GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+ GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
+ wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+
+ /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
+ wa_masked_en(wal, HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
+ if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915))
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+
+ /*
+ * Supporting preemption with fine-granularity requires changes in the
+ * batch buffer programming. Since we can't break old userspace, we
+ * need to set our default preemption level to safe value. Userspace is
+ * still able to use more fine-grained preemption levels, since in
+ * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
+ * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
+ * not real HW workarounds, but merely a way to start using preemption
+ * while maintaining old contract with userspace.
+ */
+
+ /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
+ wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+ /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
+ /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+}
+
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ struct intel_gt *gt = engine->gt;
+ u8 vals[3] = { 0, 0, 0 };
+ unsigned int i;
+
+ for (i = 0; i < 3; i++) {
+ u8 ss;
+
+ /*
+ * Only consider slices where one, and only one, subslice has 7
+ * EUs
+ */
+ if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
+ continue;
+
+ /*
+ * subslice_7eu[i] != 0 (because of the check above) and
+ * ss_max == 4 (maximum number of subslices possible per slice)
+ *
+ * -> 0 <= ss <= 3;
+ */
+ ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
+ vals[i] = 3 - ss;
+ }
+
+ if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+ return;
+
+ /* Tune IZ hashing. See intel_device_info_runtime_init() */
+ wa_masked_field_set(wal, GEN7_GT_MODE,
+ GEN9_IZ_HASHING_MASK(2) |
+ GEN9_IZ_HASHING_MASK(1) |
+ GEN9_IZ_HASHING_MASK(0),
+ GEN9_IZ_HASHING(2, vals[2]) |
+ GEN9_IZ_HASHING(1, vals[1]) |
+ GEN9_IZ_HASHING(0, vals[0]));
+}
+
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ gen9_ctx_workarounds_init(engine, wal);
+ skl_tune_iz_hashing(engine, wal);
+}
+
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ gen9_ctx_workarounds_init(engine, wal);
+
+ /* WaDisableThreadStallDopClockGating:bxt */
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
+
+ /* WaToEnableHwFixForPushConstHWBug:bxt */
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+}
+
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ gen9_ctx_workarounds_init(engine, wal);
+
+ /* WaToEnableHwFixForPushConstHWBug:kbl */
+ if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+}
+
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ gen9_ctx_workarounds_init(engine, wal);
+
+ /* WaToEnableHwFixForPushConstHWBug:glk */
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+}
+
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ gen9_ctx_workarounds_init(engine, wal);
+
+ /* WaToEnableHwFixForPushConstHWBug:cfl */
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableSbeCacheDispatchPortSharing:cfl */
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+}
+
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ /* Wa_1406697149 (WaDisableBankHangMode:icl) */
+ wa_write(wal,
+ GEN8_L3CNTLREG,
+ intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
+ GEN8_ERRDETBCTRL);
+
+ /* WaForceEnableNonCoherent:icl
+ * This is not the same workaround as in early Gen9 platforms, where
+ * lacking this could cause system hangs, but coherency performance
+ * overhead is high and only a few compute workloads really need it
+ * (the register is whitelisted in hardware now, so UMDs can opt in
+ * for coherency if they have a good reason).
+ */
+ wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+
+ /* WaEnableFloatBlendOptimization:icl */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
+ 0 /* write-only, so skip validation */,
+ true);
+
+ /* WaDisableGPGPUMidThreadPreemption:icl */
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
+
+ /* allow headerless messages for preemptible GPGPU context */
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+
+ /* Wa_1604278689:icl,ehl */
+ wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
+ wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
+
+ /* Wa_1406306137:icl,ehl */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
+}
+
+/*
+ * These settings aren't actually workarounds, but general tuning settings that
+ * need to be programmed on dg2 platform.
+ */
+static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
+ wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
+ REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128,
+ 0, false);
+}
+
+/*
+ * These settings aren't actually workarounds, but general tuning settings that
+ * need to be programmed on several platforms.
+ */
+static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ /*
+ * Although some platforms refer to it as Wa_1604555607, we need to
+ * program it even on those that don't explicitly list that
+ * workaround.
+ *
+ * Note that the programming of this register is further modified
+ * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
+ * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
+ * value when read. The default value for this register is zero for all
+ * fields and there are no bit masks. So instead of doing a RMW we
+ * should just write TDS timer value. For the same reason read
+ * verification is ignored.
+ */
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128,
+ 0, false);
+}
+
+static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ gen12_ctx_gt_tuning_init(engine, wal);
+
+ /*
+ * Wa_1409142259:tgl,dg1,adl-p
+ * Wa_1409347922:tgl,dg1,adl-p
+ * Wa_1409252684:tgl,dg1,adl-p
+ * Wa_1409217633:tgl,dg1,adl-p
+ * Wa_1409207793:tgl,dg1,adl-p
+ * Wa_1409178076:tgl,dg1,adl-p
+ * Wa_1408979724:tgl,dg1,adl-p
+ * Wa_14010443199:tgl,rkl,dg1,adl-p
+ * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
+ * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
+ */
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+
+ /* WaDisableGPGPUMidThreadPreemption:gen12 */
+ wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
+
+ /*
+ * Wa_16011163337
+ *
+ * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due
+ * to Wa_1608008084.
+ */
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224,
+ 0, false);
+}
+
+static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ gen12_ctx_workarounds_init(engine, wal);
+
+ /* Wa_1409044764 */
+ wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
+
+ /* Wa_22010493298 */
+ wa_masked_en(wal, HIZ_CHICKEN,
+ DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
+}
+
+static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ dg2_ctx_gt_tuning_init(engine, wal);
+
+ /* Wa_16011186671:dg2_g11 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ wa_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH);
+ wa_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_14010469329:dg2_g10 */
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE);
+
+ /*
+ * Wa_22010465075:dg2_g10
+ * Wa_22010613112:dg2_g10
+ * Wa_14010698770:dg2_g10
+ */
+ wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+ }
+
+ /* Wa_16013271637:dg2 */
+ wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1,
+ MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
+
+ /* Wa_14014947963:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+ wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
+
+ /* Wa_15010599737:dg2 */
+ wa_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+}
+
+static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ /*
+ * This is a "fake" workaround defined by software to ensure we
+ * maintain reliable, backward-compatible behavior for userspace with
+ * regards to how nested MI_BATCH_BUFFER_START commands are handled.
+ *
+ * The per-context setting of MI_MODE[12] determines whether the bits
+ * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
+ * in the traditional manner or whether they should instead use a new
+ * tgl+ meaning that breaks backward compatibility, but allows nesting
+ * into 3rd-level batchbuffers. When this new capability was first
+ * added in TGL, it remained off by default unless a context
+ * intentionally opted in to the new behavior. However Xe_HPG now
+ * flips this on by default and requires that we explicitly opt out if
+ * we don't want the new behavior.
+ *
+ * From a SW perspective, we want to maintain the backward-compatible
+ * behavior for userspace, so we'll apply a fake workaround to set it
+ * back to the legacy behavior on platforms where the hardware default
+ * is to break compatibility. At the moment there is no Linux
+ * userspace that utilizes third-level batchbuffers, so this will avoid
+ * userspace from needing to make any changes. using the legacy
+ * meaning is the correct thing to do. If/when we have userspace
+ * consumers that want to utilize third-level batch nesting, we can
+ * provide a context parameter to allow them to opt-in.
+ */
+ wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
+}
+
+static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ u8 mocs;
+
+ /*
+ * Some blitter commands do not have a field for MOCS, those
+ * commands will use MOCS index pointed by BLIT_CCTL.
+ * BLIT_CCTL registers are needed to be programmed to un-cached.
+ */
+ if (engine->class == COPY_ENGINE_CLASS) {
+ mocs = engine->gt->mocs.uc_index;
+ wa_write_clr_set(wal,
+ BLIT_CCTL(engine->mmio_base),
+ BLIT_CCTL_MASK,
+ BLIT_CCTL_MOCS(mocs, mocs));
+ }
+}
+
+/*
+ * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
+ * defined by the hardware team, but it programming general context registers.
+ * Adding those context register programming in context workaround
+ * allow us to use the wa framework for proper application and validation.
+ */
+static void
+gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ fakewa_disable_nestedbb_mode(engine, wal);
+
+ gen12_ctx_gt_mocs_init(engine, wal);
+}
+
+static void
+__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal,
+ const char *name)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ wa_init_start(wal, name, engine->name);
+
+ /* Applies to all engines */
+ /*
+ * Fake workarounds are not the actual workaround but
+ * programming of context registers using workaround framework.
+ */
+ if (GRAPHICS_VER(i915) >= 12)
+ gen12_ctx_gt_fake_wa_init(engine, wal);
+
+ if (engine->class != RENDER_CLASS)
+ goto done;
+
+ if (IS_PONTEVECCHIO(i915))
+ ; /* noop; none at this time */
+ else if (IS_DG2(i915))
+ dg2_ctx_workarounds_init(engine, wal);
+ else if (IS_XEHPSDV(i915))
+ ; /* noop; none at this time */
+ else if (IS_DG1(i915))
+ dg1_ctx_workarounds_init(engine, wal);
+ else if (GRAPHICS_VER(i915) == 12)
+ gen12_ctx_workarounds_init(engine, wal);
+ else if (GRAPHICS_VER(i915) == 11)
+ icl_ctx_workarounds_init(engine, wal);
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
+ cfl_ctx_workarounds_init(engine, wal);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine, wal);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine, wal);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine, wal);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine, wal);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine, wal);
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine, wal);
+ else if (GRAPHICS_VER(i915) == 7)
+ gen7_ctx_workarounds_init(engine, wal);
+ else if (GRAPHICS_VER(i915) == 6)
+ gen6_ctx_workarounds_init(engine, wal);
+ else if (GRAPHICS_VER(i915) < 8)
+ ;
+ else
+ MISSING_CASE(GRAPHICS_VER(i915));
+
+done:
+ wa_init_finish(wal);
+}
+
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
+{
+ __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
+}
+
+int intel_engine_emit_ctx_wa(struct i915_request *rq)
+{
+ struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
+ u32 *cs;
+ int ret;
+
+ if (wal->count == 0)
+ return 0;
+
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ cs = intel_ring_begin(rq, (wal->count * 2 + 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = wa->set;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+gen4_gt_workarounds_init(struct intel_gt *gt,
+ struct i915_wa_list *wal)
+{
+ /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+}
+
+static void
+g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ gen4_gt_workarounds_init(gt, wal);
+
+ /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
+ wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
+}
+
+static void
+ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ g4x_gt_workarounds_init(gt, wal);
+
+ wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
+}
+
+static void
+snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+}
+
+static void
+ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
+ wa_masked_dis(wal,
+ GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode:ivb */
+ wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+ /* WaForceL3Serialization:ivb */
+ wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
+}
+
+static void
+vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ /* WaForceL3Serialization:vlv */
+ wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /*
+ * WaIncreaseL3CreditsForVLVB0:vlv
+ * This is the hardware default actually.
+ */
+ wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+}
+
+static void
+hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ /* L3 caching of data atomics doesn't work -- disable it. */
+ wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+
+ wa_add(wal,
+ HSW_ROW_CHICKEN3, 0,
+ _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+ 0 /* XXX does this reg exist? */, true);
+
+ /* WaVSRefCountFullforceMissDisable:hsw */
+ wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
+}
+
+static void
+gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
+ unsigned int slice, subslice;
+ u32 mcr, mcr_mask;
+
+ GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
+
+ /*
+ * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
+ * Before any MMIO read into slice/subslice specific registers, MCR
+ * packet control register needs to be programmed to point to any
+ * enabled s/ss pair. Otherwise, incorrect values will be returned.
+ * This means each subsequent MMIO read will be forwarded to an
+ * specific s/ss combination, but this is OK since these registers
+ * are consistent across s/ss in almost all cases. In the rare
+ * occasions, such as INSTDONE, where this value is dependent
+ * on s/ss combo, the read should be done with read_subslice_reg.
+ */
+ slice = ffs(sseu->slice_mask) - 1;
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
+ subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
+ GEM_BUG_ON(!subslice);
+ subslice--;
+
+ /*
+ * We use GEN8_MCR..() macros to calculate the |mcr| value for
+ * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
+ */
+ mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+
+ drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
+
+ wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
+}
+
+static void
+gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
+ gen9_wa_init_mcr(i915, wal);
+
+ /* WaDisableKillLogic:bxt,skl,kbl */
+ if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ ECOCHK_DIS_TLB);
+
+ if (HAS_LLC(i915)) {
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
+ *
+ * Must match Display Engine. See
+ * WaCompressedResourceDisplayNewHashMode.
+ */
+ wa_write_or(wal,
+ MMCD_MISC_CTRL,
+ MMCD_PCLA | MMCD_HOTSPOT_EN);
+ }
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ BDW_DISABLE_HDC_INVALIDATION);
+}
+
+static void
+skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ gen9_gt_workarounds_init(gt, wal);
+
+ /* WaDisableGafsUnitClkGating:skl */
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaInPlaceDecompressionHang:skl */
+ if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void
+kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ gen9_gt_workarounds_init(gt, wal);
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaInPlaceDecompressionHang:kbl */
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void
+glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ gen9_gt_workarounds_init(gt, wal);
+}
+
+static void
+cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ gen9_gt_workarounds_init(gt, wal);
+
+ /* WaDisableGafsUnitClkGating:cfl */
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaInPlaceDecompressionHang:cfl */
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void __set_mcr_steering(struct i915_wa_list *wal,
+ i915_reg_t steering_reg,
+ unsigned int slice, unsigned int subslice)
+{
+ u32 mcr, mcr_mask;
+
+ mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+
+ wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
+}
+
+static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
+ unsigned int slice, unsigned int subslice)
+{
+ struct drm_printer p = drm_debug_printer("MCR Steering:");
+
+ __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
+
+ gt->default_steering.groupid = slice;
+ gt->default_steering.instanceid = subslice;
+
+ if (drm_debug_enabled(DRM_UT_DRIVER))
+ intel_gt_mcr_report_steering(&p, gt, false);
+}
+
+static void
+icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ const struct sseu_dev_info *sseu = &gt->info.sseu;
+ unsigned int subslice;
+
+ GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
+ GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
+
+ /*
+ * Although a platform may have subslices, we need to always steer
+ * reads to the lowest instance that isn't fused off. When Render
+ * Power Gating is enabled, grabbing forcewake will only power up a
+ * single subslice (the "minconfig") if there isn't a real workload
+ * that needs to be run; this means that if we steer register reads to
+ * one of the higher subslices, we run the risk of reading back 0's or
+ * random garbage.
+ */
+ subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
+
+ /*
+ * If the subslice we picked above also steers us to a valid L3 bank,
+ * then we can just rely on the default steering and won't need to
+ * worry about explicitly re-steering L3BANK reads later.
+ */
+ if (gt->info.l3bank_mask & BIT(subslice))
+ gt->steering_table[L3BANK] = NULL;
+
+ __add_mcr_wa(gt, wal, 0, subslice);
+}
+
+static void
+xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ const struct sseu_dev_info *sseu = &gt->info.sseu;
+ unsigned long slice, subslice = 0, slice_mask = 0;
+ u32 lncf_mask = 0;
+ int i;
+
+ /*
+ * On Xe_HP the steering increases in complexity. There are now several
+ * more units that require steering and we're not guaranteed to be able
+ * to find a common setting for all of them. These are:
+ * - GSLICE (fusable)
+ * - DSS (sub-unit within gslice; fusable)
+ * - L3 Bank (fusable)
+ * - MSLICE (fusable)
+ * - LNCF (sub-unit within mslice; always present if mslice is present)
+ *
+ * We'll do our default/implicit steering based on GSLICE (in the
+ * sliceid field) and DSS (in the subsliceid field). If we can
+ * find overlap between the valid MSLICE and/or LNCF values with
+ * a suitable GSLICE, then we can just re-use the default value and
+ * skip and explicit steering at runtime.
+ *
+ * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
+ * a valid sliceid value. DSS steering is the only type of steering
+ * that utilizes the 'subsliceid' bits.
+ *
+ * Also note that, even though the steering domain is called "GSlice"
+ * and it is encoded in the register using the gslice format, the spec
+ * says that the combined (geometry | compute) fuse should be used to
+ * select the steering.
+ */
+
+ /* Find the potential gslice candidates */
+ slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
+ GEN_DSS_PER_GSLICE);
+
+ /*
+ * Find the potential LNCF candidates. Either LNCF within a valid
+ * mslice is fine.
+ */
+ for_each_set_bit(i, &gt->info.mslice_mask, GEN12_MAX_MSLICES)
+ lncf_mask |= (0x3 << (i * 2));
+
+ /*
+ * Are there any sliceid values that work for both GSLICE and LNCF
+ * steering?
+ */
+ if (slice_mask & lncf_mask) {
+ slice_mask &= lncf_mask;
+ gt->steering_table[LNCF] = NULL;
+ }
+
+ /* How about sliceid values that also work for MSLICE steering? */
+ if (slice_mask & gt->info.mslice_mask) {
+ slice_mask &= gt->info.mslice_mask;
+ gt->steering_table[MSLICE] = NULL;
+ }
+
+ slice = __ffs(slice_mask);
+ subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
+ GEN_DSS_PER_GSLICE;
+
+ __add_mcr_wa(gt, wal, slice, subslice);
+
+ /*
+ * SQIDI ranges are special because they use different steering
+ * registers than everything else we work with. On XeHP SDV and
+ * DG2-G10, any value in the steering registers will work fine since
+ * all instances are present, but DG2-G11 only has SQIDI instances at
+ * ID's 2 and 3, so we need to steer to one of those. For simplicity
+ * we'll just steer to a hardcoded "2" since that value will work
+ * everywhere.
+ */
+ __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
+ __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
+}
+
+static void
+pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ unsigned int dss;
+
+ /*
+ * Setup implicit steering for COMPUTE and DSS ranges to the first
+ * non-fused-off DSS. All other types of MCR registers will be
+ * explicitly steered.
+ */
+ dss = intel_sseu_find_first_xehp_dss(&gt->info.sseu, 0, 0);
+ __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
+}
+
+static void
+icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ icl_wa_init_mcr(gt, wal);
+
+ /* WaModifyGamTlbPartitioning:icl */
+ wa_write_clr_set(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
+
+ /* Wa_1405766107:icl
+ * Formerly known as WaCL2SFHalfMaxAlloc
+ */
+ wa_write_or(wal,
+ GEN11_LSN_UNSLCVC,
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+
+ /* Wa_220166154:icl
+ * Formerly known as WaDisCtxReload
+ */
+ wa_write_or(wal,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+
+ /* Wa_1406463099:icl
+ * Formerly known as WaGamTlbPendError
+ */
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ PSDUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl,ehl */
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
+
+ /* Wa_1607087056:icl,ehl,jsl */
+ if (IS_ICELAKE(i915) ||
+ IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+ /*
+ * This is not a documented workaround, but rather an optimization
+ * to reduce sampler power.
+ */
+ wa_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
+}
+
+/*
+ * Though there are per-engine instances of these registers,
+ * they retain their value through engine resets and should
+ * only be provided on the GT workaround list rather than
+ * the engine-specific workaround list.
+ */
+static void
+wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ int id;
+
+ for_each_engine(engine, gt, id) {
+ if (engine->class != VIDEO_DECODE_CLASS ||
+ (engine->instance % 2))
+ continue;
+
+ wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
+ IECPUNIT_CLKGATE_DIS);
+ }
+}
+
+static void
+gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ icl_wa_init_mcr(gt, wal);
+
+ /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
+ wa_14011060649(gt, wal);
+
+ /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
+ wa_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
+}
+
+static void
+tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ gen12_gt_workarounds_init(gt, wal);
+
+ /* Wa_1409420604:tgl */
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
+
+ /* Wa_1607087056:tgl also know as BUG:1409180338 */
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+ /* Wa_1408615072:tgl[a0] */
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
+}
+
+static void
+dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ gen12_gt_workarounds_init(gt, wal);
+
+ /* Wa_1607087056:dg1 */
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+ /* Wa_1409420604:dg1 */
+ if (IS_DG1(i915))
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
+
+ /* Wa_1408615072:dg1 */
+ /* Empirical testing shows this register is unaffected by engine reset. */
+ if (IS_DG1(i915))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
+}
+
+static void
+xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ xehp_init_mcr(gt, wal);
+
+ /* Wa_1409757795:xehpsdv */
+ wa_write_or(wal, SCCGCTL94DC, CG3DDISURB);
+
+ /* Wa_16011155590:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ TSGUNIT_CLKGATE_DIS);
+
+ /* Wa_14011780169:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
+ wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
+ GAMTLBVDBOX7_CLKGATE_DIS |
+ GAMTLBVDBOX6_CLKGATE_DIS |
+ GAMTLBVDBOX5_CLKGATE_DIS |
+ GAMTLBVDBOX4_CLKGATE_DIS |
+ GAMTLBVDBOX3_CLKGATE_DIS |
+ GAMTLBVDBOX2_CLKGATE_DIS |
+ GAMTLBVDBOX1_CLKGATE_DIS |
+ GAMTLBVDBOX0_CLKGATE_DIS |
+ GAMTLBKCR_CLKGATE_DIS |
+ GAMTLBGUC_CLKGATE_DIS |
+ GAMTLBBLT_CLKGATE_DIS);
+ wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
+ GAMTLBGFXA1_CLKGATE_DIS |
+ GAMTLBCOMPA0_CLKGATE_DIS |
+ GAMTLBCOMPA1_CLKGATE_DIS |
+ GAMTLBCOMPB0_CLKGATE_DIS |
+ GAMTLBCOMPB1_CLKGATE_DIS |
+ GAMTLBCOMPC0_CLKGATE_DIS |
+ GAMTLBCOMPC1_CLKGATE_DIS |
+ GAMTLBCOMPD0_CLKGATE_DIS |
+ GAMTLBCOMPD1_CLKGATE_DIS |
+ GAMTLBMERT_CLKGATE_DIS |
+ GAMTLBVEBOX3_CLKGATE_DIS |
+ GAMTLBVEBOX2_CLKGATE_DIS |
+ GAMTLBVEBOX1_CLKGATE_DIS |
+ GAMTLBVEBOX0_CLKGATE_DIS);
+ }
+
+ /* Wa_16012725990:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
+
+ /* Wa_14011060649:xehpsdv */
+ wa_14011060649(gt, wal);
+}
+
+static void
+dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ int id;
+
+ xehp_init_mcr(gt, wal);
+
+ /* Wa_14011060649:dg2 */
+ wa_14011060649(gt, wal);
+
+ /*
+ * Although there are per-engine instances of these registers,
+ * they technically exist outside the engine itself and are not
+ * impacted by engine resets. Furthermore, they're part of the
+ * GuC blacklist so trying to treat them as engine workarounds
+ * will result in GuC initialization failure and a wedged GPU.
+ */
+ for_each_engine(engine, gt, id) {
+ if (engine->class != VIDEO_DECODE_CLASS)
+ continue;
+
+ /* Wa_16010515920:dg2_g10 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+ wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base),
+ ALNUNIT_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_G10(gt->i915)) {
+ /* Wa_22010523718:dg2 */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ CG3DDISCFEG_CLKGATE_DIS);
+
+ /* Wa_14011006942:dg2 */
+ wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE,
+ DSS_ROUTER_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_14010948348:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
+
+ /* Wa_14011037102:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS);
+
+ /* Wa_14011371254:dg2_g10 */
+ wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS);
+
+ /* Wa_14011431319:dg2_g10 */
+ wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
+ GAMTLBVDBOX7_CLKGATE_DIS |
+ GAMTLBVDBOX6_CLKGATE_DIS |
+ GAMTLBVDBOX5_CLKGATE_DIS |
+ GAMTLBVDBOX4_CLKGATE_DIS |
+ GAMTLBVDBOX3_CLKGATE_DIS |
+ GAMTLBVDBOX2_CLKGATE_DIS |
+ GAMTLBVDBOX1_CLKGATE_DIS |
+ GAMTLBVDBOX0_CLKGATE_DIS |
+ GAMTLBKCR_CLKGATE_DIS |
+ GAMTLBGUC_CLKGATE_DIS |
+ GAMTLBBLT_CLKGATE_DIS);
+ wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
+ GAMTLBGFXA1_CLKGATE_DIS |
+ GAMTLBCOMPA0_CLKGATE_DIS |
+ GAMTLBCOMPA1_CLKGATE_DIS |
+ GAMTLBCOMPB0_CLKGATE_DIS |
+ GAMTLBCOMPB1_CLKGATE_DIS |
+ GAMTLBCOMPC0_CLKGATE_DIS |
+ GAMTLBCOMPC1_CLKGATE_DIS |
+ GAMTLBCOMPD0_CLKGATE_DIS |
+ GAMTLBCOMPD1_CLKGATE_DIS |
+ GAMTLBMERT_CLKGATE_DIS |
+ GAMTLBVEBOX3_CLKGATE_DIS |
+ GAMTLBVEBOX2_CLKGATE_DIS |
+ GAMTLBVEBOX1_CLKGATE_DIS |
+ GAMTLBVEBOX0_CLKGATE_DIS);
+
+ /* Wa_14010569222:dg2_g10 */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ GAMEDIA_CLKGATE_DIS);
+
+ /* Wa_14011028019:dg2_g10 */
+ wa_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
+ }
+
+ /* Wa_14014830051:dg2 */
+ wa_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
+
+ /*
+ * The following are not actually "workarounds" but rather
+ * recommended tuning settings documented in the bspec's
+ * performance guide section.
+ */
+ wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS);
+
+ /* Wa_14015795083 */
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+}
+
+static void
+pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ pvc_init_mcr(gt, wal);
+
+ /* Wa_14015795083 */
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+}
+
+static void
+gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_PONTEVECCHIO(i915))
+ pvc_gt_workarounds_init(gt, wal);
+ else if (IS_DG2(i915))
+ dg2_gt_workarounds_init(gt, wal);
+ else if (IS_XEHPSDV(i915))
+ xehpsdv_gt_workarounds_init(gt, wal);
+ else if (IS_DG1(i915))
+ dg1_gt_workarounds_init(gt, wal);
+ else if (IS_TIGERLAKE(i915))
+ tgl_gt_workarounds_init(gt, wal);
+ else if (GRAPHICS_VER(i915) == 12)
+ gen12_gt_workarounds_init(gt, wal);
+ else if (GRAPHICS_VER(i915) == 11)
+ icl_gt_workarounds_init(gt, wal);
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
+ cfl_gt_workarounds_init(gt, wal);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(gt, wal);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(gt, wal);
+ else if (IS_BROXTON(i915))
+ gen9_gt_workarounds_init(gt, wal);
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(gt, wal);
+ else if (IS_HASWELL(i915))
+ hsw_gt_workarounds_init(gt, wal);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_gt_workarounds_init(gt, wal);
+ else if (IS_IVYBRIDGE(i915))
+ ivb_gt_workarounds_init(gt, wal);
+ else if (GRAPHICS_VER(i915) == 6)
+ snb_gt_workarounds_init(gt, wal);
+ else if (GRAPHICS_VER(i915) == 5)
+ ilk_gt_workarounds_init(gt, wal);
+ else if (IS_G4X(i915))
+ g4x_gt_workarounds_init(gt, wal);
+ else if (GRAPHICS_VER(i915) == 4)
+ gen4_gt_workarounds_init(gt, wal);
+ else if (GRAPHICS_VER(i915) <= 8)
+ ;
+ else
+ MISSING_CASE(GRAPHICS_VER(i915));
+}
+
+void intel_gt_init_workarounds(struct intel_gt *gt)
+{
+ struct i915_wa_list *wal = &gt->wa_list;
+
+ wa_init_start(wal, "GT", "global");
+ gt_init_workarounds(gt, wal);
+ wa_init_finish(wal);
+}
+
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(uncore,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
+
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->set) & wa->read) {
+ DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg),
+ cur, cur & wa->read, wa->set & wa->read);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void
+wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ enum forcewake_domains fw;
+ unsigned long flags;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
+ return;
+
+ fw = wal_get_fw_for_rmw(uncore, wal);
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val, old = 0;
+
+ /* open-coded rmw due to steering */
+ old = wa->clr ? intel_gt_mcr_read_any_fw(gt, wa->reg) : 0;
+ val = (old & ~wa->clr) | wa->set;
+ if (val != old || !wa->clr)
+ intel_uncore_write_fw(uncore, wa->reg, val);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ wa_verify(wa, intel_gt_mcr_read_any_fw(gt, wa->reg),
+ wal->name, "application");
+ }
+
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock_irqrestore(&uncore->lock, flags);
+}
+
+void intel_gt_apply_workarounds(struct intel_gt *gt)
+{
+ wa_list_apply(gt, &gt->wa_list);
+}
+
+static bool wa_list_verify(struct intel_gt *gt,
+ const struct i915_wa_list *wal,
+ const char *from)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ struct i915_wa *wa;
+ enum forcewake_domains fw;
+ unsigned long flags;
+ unsigned int i;
+ bool ok = true;
+
+ fw = wal_get_fw_for_rmw(uncore, wal);
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ ok &= wa_verify(wa,
+ intel_gt_mcr_read_any_fw(gt, wa->reg),
+ wal->name, from);
+
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock_irqrestore(&uncore->lock, flags);
+
+ return ok;
+}
+
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
+{
+ return wa_list_verify(gt, &gt->wa_list, from);
+}
+
+__maybe_unused
+static bool is_nonpriv_flags_valid(u32 flags)
+{
+ /* Check only valid flag bits are set */
+ if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
+ return false;
+
+ /* NB: Only 3 out of 4 enum values are valid for access field */
+ if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
+ return false;
+
+ return true;
+}
+
+static void
+whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
+{
+ struct i915_wa wa = {
+ .reg = reg
+ };
+
+ if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
+ if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
+ return;
+
+ wa.reg.reg |= flags;
+ _wa_add(wal, &wa);
+}
+
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+ whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
+}
+
+static void gen9_whitelist_build(struct i915_wa_list *w)
+{
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
+ whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+ whitelist_reg(w, GEN8_CS_CHICKEN1);
+
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
+ whitelist_reg(w, GEN8_HDC_CHICKEN1);
+
+ /* WaSendPushConstantsFromMMIO:skl,bxt */
+ whitelist_reg(w, COMMON_SLICE_CHICKEN2);
+}
+
+static void skl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(w);
+
+ /* WaDisableLSQCROPERFforOCL:skl */
+ whitelist_reg(w, GEN8_L3SQCREG4);
+}
+
+static void bxt_whitelist_build(struct intel_engine_cs *engine)
+{
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(&engine->whitelist);
+}
+
+static void kbl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(w);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ whitelist_reg(w, GEN8_L3SQCREG4);
+}
+
+static void glk_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(w);
+
+ /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+}
+
+static void cfl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(w);
+
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
+ *
+ * This covers 4 register which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+}
+
+static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ whitelist_reg_ext(w,
+ RING_CTX_TIMESTAMP(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
+}
+
+static void cml_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
+
+ cfl_whitelist_build(engine);
+}
+
+static void icl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ allow_read_ctx_timestamp(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+ /* WaAllowUMDToModifySamplerMode:icl */
+ whitelist_reg(w, GEN10_SAMPLER_MODE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
+ *
+ * This covers 4 register which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+ break;
+
+ case VIDEO_DECODE_CLASS:
+ /* hucStatusRegOffset */
+ whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
+ /* hucUKernelHdrInfoRegOffset */
+ whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
+ /* hucStatus2RegOffset */
+ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void tgl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ allow_read_ctx_timestamp(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ * Wa_1408556865:tgl
+ *
+ * This covers 4 registers which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+
+ /*
+ * Wa_1808121037:tgl
+ * Wa_14012131227:dg1
+ * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
+ */
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
+
+ /* Wa_1806527549:tgl */
+ whitelist_reg(w, HIZ_CHICKEN);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dg1_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ tgl_whitelist_build(engine);
+
+ /* GEN:BUG:1409280441:dg1 */
+ if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) &&
+ (engine->class == RENDER_CLASS ||
+ engine->class == COPY_ENGINE_CLASS))
+ whitelist_reg_ext(w, RING_ID(engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
+}
+
+static void xehpsdv_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
+}
+
+static void dg2_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ allow_read_ctx_timestamp(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /*
+ * Wa_1507100340:dg2_g10
+ *
+ * This covers 4 registers which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+
+ break;
+ case COMPUTE_CLASS:
+ /* Wa_16011157294:dg2_g10 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
+ whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
+ break;
+ default:
+ break;
+ }
+}
+
+static void blacklist_trtt(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ /*
+ * Prevent read/write access to [0x4400, 0x4600) which covers
+ * the TRTT range across all engines. Note that normally userspace
+ * cannot access the other engines' trtt control, but for simplicity
+ * we cover the entire range on each engine.
+ */
+ whitelist_reg_ext(w, _MMIO(0x4400),
+ RING_FORCE_TO_NONPRIV_DENY |
+ RING_FORCE_TO_NONPRIV_RANGE_64);
+ whitelist_reg_ext(w, _MMIO(0x4500),
+ RING_FORCE_TO_NONPRIV_DENY |
+ RING_FORCE_TO_NONPRIV_RANGE_64);
+}
+
+static void pvc_whitelist_build(struct intel_engine_cs *engine)
+{
+ allow_read_ctx_timestamp(engine);
+
+ /* Wa_16014440446:pvc */
+ blacklist_trtt(engine);
+}
+
+void intel_engine_init_whitelist(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *w = &engine->whitelist;
+
+ wa_init_start(w, "whitelist", engine->name);
+
+ if (IS_PONTEVECCHIO(i915))
+ pvc_whitelist_build(engine);
+ else if (IS_DG2(i915))
+ dg2_whitelist_build(engine);
+ else if (IS_XEHPSDV(i915))
+ xehpsdv_whitelist_build(engine);
+ else if (IS_DG1(i915))
+ dg1_whitelist_build(engine);
+ else if (GRAPHICS_VER(i915) == 12)
+ tgl_whitelist_build(engine);
+ else if (GRAPHICS_VER(i915) == 11)
+ icl_whitelist_build(engine);
+ else if (IS_COMETLAKE(i915))
+ cml_whitelist_build(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_whitelist_build(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_whitelist_build(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_whitelist_build(engine);
+ else if (IS_BROXTON(i915))
+ bxt_whitelist_build(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_whitelist_build(engine);
+ else if (GRAPHICS_VER(i915) <= 8)
+ ;
+ else
+ MISSING_CASE(GRAPHICS_VER(i915));
+
+ wa_init_finish(w);
+}
+
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
+{
+ const struct i915_wa_list *wal = &engine->whitelist;
+ struct intel_uncore *uncore = engine->uncore;
+ const u32 base = engine->mmio_base;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
+ return;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ intel_uncore_write(uncore,
+ RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
+
+ /* And clear the rest just in case of garbage */
+ for (; i < RING_MAX_NONPRIV_SLOTS; i++)
+ intel_uncore_write(uncore,
+ RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
+}
+
+/*
+ * engine_fake_wa_init(), a place holder to program the registers
+ * which are not part of an official workaround defined by the
+ * hardware team.
+ * Adding programming of those register inside workaround will
+ * allow utilizing wa framework to proper application and verification.
+ */
+static void
+engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ u8 mocs_w, mocs_r;
+
+ /*
+ * RING_CMD_CCTL specifies the default MOCS entry that will be used
+ * by the command streamer when executing commands that don't have
+ * a way to explicitly specify a MOCS setting. The default should
+ * usually reference whichever MOCS entry corresponds to uncached
+ * behavior, although use of a WB cached entry is recommended by the
+ * spec in certain circumstances on specific platforms.
+ */
+ if (GRAPHICS_VER(engine->i915) >= 12) {
+ mocs_r = engine->gt->mocs.uc_index;
+ mocs_w = engine->gt->mocs.uc_index;
+
+ if (HAS_L3_CCS_READ(engine->i915) &&
+ engine->class == COMPUTE_CLASS) {
+ mocs_r = engine->gt->mocs.wb_index;
+
+ /*
+ * Even on the few platforms where MOCS 0 is a
+ * legitimate table entry, it's never the correct
+ * setting to use here; we can assume the MOCS init
+ * just forgot to initialize wb_index.
+ */
+ drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
+ }
+
+ wa_masked_field_set(wal,
+ RING_CMD_CCTL(engine->mmio_base),
+ CMD_CCTL_MOCS_MASK,
+ CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
+ }
+}
+
+static bool needs_wa_1308578152(struct intel_engine_cs *engine)
+{
+ return intel_sseu_find_first_xehp_dss(&engine->gt->info.sseu, 0, 0) >=
+ GEN_DSS_PER_GSLICE;
+}
+
+static void
+rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (IS_DG2(i915)) {
+ /* Wa_1509235366:dg2 */
+ wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
+ GLOBAL_INVALIDATION_MODE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14013392000:dg2_g11 */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
+
+ /* Wa_16011620976:dg2_g11 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+ /* Wa_1509727124:dg2 */
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ SC_DISABLE_POWER_OPTIMIZATION_EBB);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14012419201:dg2 */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4,
+ GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(i915)) {
+ /*
+ * Wa_22012826095:dg2
+ * Wa_22013059131:dg2
+ */
+ wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
+ MAXREQS_PER_BANK,
+ REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
+
+ /* Wa_22013059131:dg2 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0,
+ FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
+ }
+
+ /* Wa_1308578152:dg2_g10 when first gslice is fused off */
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
+ needs_wa_1308578152(engine)) {
+ wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
+ GEN12_REPLAY_MODE_GRANULARITY);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+ /* Wa_22013037850:dg2 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
+ DISABLE_128B_EVICTION_COMMAND_UDW);
+
+ /* Wa_22012856258:dg2 */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_DISABLE_READ_SUPPRESSION);
+
+ /*
+ * Wa_22010960976:dg2
+ * Wa_14013347512:dg2
+ */
+ wa_masked_dis(wal, GEN12_HDC_CHICKEN0,
+ LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_1608949956:dg2_g10
+ * Wa_14010198302:dg2_g10
+ */
+ wa_masked_en(wal, GEN8_ROW_CHICKEN,
+ MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
+
+ /*
+ * Wa_14010918519:dg2_g10
+ *
+ * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
+ * so ignoring verification.
+ */
+ wa_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
+ FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
+ 0, false);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_22010430635:dg2 */
+ wa_masked_en(wal,
+ GEN9_ROW_CHICKEN4,
+ GEN12_DISABLE_GRF_CLEAR);
+
+ /* Wa_14010648519:dg2 */
+ wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
+ }
+
+ /* Wa_14013202645:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
+ wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
+
+ /* Wa_22012532006:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
+ wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+ /* Wa_14010680813:dg2_g10 */
+ wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS |
+ EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14012362059:dg2 */
+ wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G10(i915)) {
+ /* Wa_22014600077:dg2 */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
+ 0 /* Wa_14012342262 :write-only reg, so skip
+ verification */,
+ true);
+ }
+
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
+ IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_1607138336:tgl[a0],dg1[a0]
+ * Wa_1607063988:tgl[a0],dg1[a0]
+ */
+ wa_write_or(wal,
+ GEN9_CTX_PREEMPT_REG,
+ GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
+ }
+
+ if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_1606679103:tgl
+ * (see also Wa_1606682166:icl)
+ */
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
+ if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
+
+ /*
+ * Wa_1407928979:tgl A*
+ * Wa_18011464164:tgl[B0+],dg1[B0+]
+ * Wa_22010931296:tgl[B0+],dg1[B0+]
+ * Wa_14010919138:rkl,dg1,adl-s,adl-p
+ */
+ wa_write_or(wal, GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+ }
+
+ if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) ||
+ IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /*
+ * Wa_1606700617:tgl,dg1,adl-p
+ * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
+ * Wa_14010826681:tgl,dg1,rkl,adl-p
+ * Wa_18019627453:dg2
+ */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+ }
+
+ if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
+ IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_PUSH_CONST_DEREF_HOLD_DIS);
+
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+ }
+
+ if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
+ /*
+ * Wa_1607030317:tgl
+ * Wa_1607186500:tgl
+ * Wa_1607297627:tgl,rkl,dg1[a0],adlp
+ *
+ * On TGL and RKL there are multiple entries for this WA in the
+ * BSpec; some indicate this is an A0-only WA, others indicate
+ * it applies to all steppings so we trust the "all steppings."
+ * For DG1 this only applies to A0.
+ */
+ wa_masked_en(wal,
+ RING_PSMI_CTL(RENDER_RING_BASE),
+ GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+ }
+
+ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
+ /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
+ }
+
+ if (GRAPHICS_VER(i915) == 11) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+ /*
+ * Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /*
+ * Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ wa_write_clr_set(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_clr_set(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+ /*
+ * Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* Wa_1606682166:icl */
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_SAMPLER_PREFETCH);
+
+ /* Wa_1409178092:icl */
+ wa_write_clr_set(wal,
+ GEN11_SCRATCH2,
+ GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+ 0);
+
+ /* WaEnable32PlaneMode:icl */
+ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+ GEN11_ENABLE_32_PLANE_MODE);
+
+ /*
+ * Wa_1408767742:icl[a2..forever],ehl[all]
+ * Wa_1605460711:icl[a0..c0]
+ */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /* Wa_22010271021 */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+ }
+
+ if (IS_GRAPHICS_VER(i915, 9, 12)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
+ wa_masked_en(wal,
+ GEN7_FF_SLICE_CS_CHICKEN1,
+ GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+ }
+
+ if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
+ /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN9_GAPS_TSV_CREDIT_DISABLE);
+ }
+
+ if (IS_BROXTON(i915)) {
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ wa_masked_en(wal,
+ FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ if (GRAPHICS_VER(i915) == 9) {
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal,
+ GEN9_CSFE_CHICKEN1_RCS,
+ GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ wa_write_or(wal,
+ BDW_SCRATCH1,
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_write_clr_set(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /* Disable atomics in L3 to prevent unrecoverable hangs */
+ wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
+ GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+ wa_write_clr_set(wal, GEN8_L3SQCREG4,
+ GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
+ wa_write_clr_set(wal, GEN9_SCRATCH1,
+ EVICTION_PERF_FIX_ENABLE, 0);
+ }
+
+ if (IS_HASWELL(i915)) {
+ /* WaSampleCChickenBitEnable:hsw */
+ wa_masked_en(wal,
+ HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
+
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ /* enable HiZ Raw Stall Optimization */
+ HIZ_RAW_STALL_OPT_DISABLE);
+ }
+
+ if (IS_VALLEYVIEW(i915)) {
+ /* WaDisableEarlyCull:vlv */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaPsdDispatchEnable:vlv */
+ /* WaDisablePSDDualDispatchEnable:vlv */
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+
+ if (IS_IVYBRIDGE(i915)) {
+ /* WaDisableEarlyCull:ivb */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ HIZ_RAW_STALL_OPT_DISABLE);
+ }
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_clr_set(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /* WaDisablePSDDualDispatchEnable:ivb */
+ if (IS_IVB_GT1(i915))
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+
+ if (GRAPHICS_VER(i915) == 7) {
+ /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
+ wa_masked_en(wal,
+ RING_MODE_GEN7(RENDER_RING_BASE),
+ GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+
+ /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization:ivb,hsw
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal,
+ CACHE_MODE_1,
+ PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_masked_field_set(wal,
+ GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+ }
+
+ if (IS_GRAPHICS_VER(i915, 6, 7))
+ /*
+ * We need to disable the AsyncFlip performance optimisations in
+ * order to use MI_WAIT_FOR_EVENT within the CS. It should
+ * already be programmed to '1' on all products.
+ *
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+ */
+ wa_masked_en(wal,
+ RING_MI_MODE(RENDER_RING_BASE),
+ ASYNC_FLIP_PERF_DISABLE);
+
+ if (GRAPHICS_VER(i915) == 6) {
+ /*
+ * Required for the hardware to program scanline values for
+ * waiting
+ * WaEnableFlushTlbInvalidationMode:snb
+ */
+ wa_masked_en(wal,
+ GFX_MODE,
+ GFX_TLB_INVALIDATE_EXPLICIT);
+
+ /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+ wa_masked_en(wal,
+ _3D_CHICKEN,
+ _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
+
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
+ /*
+ * Bspec says:
+ * "This bit must be set if 3DSTATE_CLIP clip mode is set
+ * to normal and 3DSTATE_SF number of SF output attributes
+ * is more than 16."
+ */
+ _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_masked_field_set(wal,
+ GEN6_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ wa_masked_dis(wal,
+ CACHE_MODE_0,
+ CM0_STC_EVICT_DISABLE_LRA_SNB);
+ }
+
+ if (IS_GRAPHICS_VER(i915, 4, 6))
+ /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
+ wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
+ 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
+ /* XXX bit doesn't stick on Broadwater */
+ IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
+
+ if (GRAPHICS_VER(i915) == 4)
+ /*
+ * Disable CONSTANT_BUFFER before it is loaded from the context
+ * image. For as it is loaded, it is executed and the stored
+ * address may no longer be valid, leading to a GPU hang.
+ *
+ * This imposes the requirement that userspace reload their
+ * CONSTANT_BUFFER on every batch, fortunately a requirement
+ * they are already accustomed to from before contexts were
+ * enabled.
+ */
+ wa_add(wal, ECOSKPD(RENDER_RING_BASE),
+ 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
+ 0 /* XXX bit doesn't stick on Broadwater */,
+ true);
+}
+
+static void
+xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
+ wa_write(wal,
+ RING_SEMA_WAIT_POLL(engine->mmio_base),
+ 1);
+ }
+}
+
+static void
+ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
+ /* Wa_14014999345:pvc */
+ wa_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
+ }
+}
+
+/*
+ * The bspec performance guide has recommended MMIO tuning settings. These
+ * aren't truly "workarounds" but we want to program them with the same
+ * workaround infrastructure to ensure that they're automatically added to
+ * the GuC save/restore lists, re-applied at the right times, and checked for
+ * any conflicting programming requested by real workarounds.
+ *
+ * Programming settings should be added here only if their registers are not
+ * part of an engine's register state context. If a register is part of a
+ * context, then any tuning settings should be programmed in an appropriate
+ * function invoked by __intel_engine_init_ctx_wa().
+ */
+static void
+add_render_compute_tuning_settings(struct drm_i915_private *i915,
+ struct i915_wa_list *wal)
+{
+ if (IS_PONTEVECCHIO(i915)) {
+ wa_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ }
+
+ if (IS_DG2(i915)) {
+ wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
+
+ /*
+ * This is also listed as Wa_22012654132 for certain DG2
+ * steppings, but the tuning setting programming is a superset
+ * since it applies to all DG2 variants and steppings.
+ *
+ * Note that register 0xE420 is write-only and cannot be read
+ * back for verification on DG2 (due to Wa_14012342262), so
+ * we need to explicitly skip the readback.
+ */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ 0 /* write-only, so skip validation */,
+ true);
+ }
+
+ /*
+ * This tuning setting proves beneficial only on ATS-M designs; the
+ * default "age based" setting is optimal on regular DG2 and other
+ * platforms.
+ */
+ if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
+ wa_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
+ THREAD_EX_ARB_MODE_RR_AFTER_DEP);
+}
+
+/*
+ * The workarounds in this function apply to shared registers in
+ * the general render reset domain that aren't tied to a
+ * specific engine. Since all render+compute engines get reset
+ * together, and the contents of these registers are lost during
+ * the shared render domain reset, we'll define such workarounds
+ * here and then add them to just a single RCS or CCS engine's
+ * workaround list (whichever engine has the XXXX flag).
+ */
+static void
+general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ add_render_compute_tuning_settings(i915, wal);
+
+ if (IS_PONTEVECCHIO(i915)) {
+ /* Wa_16016694945 */
+ wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
+ }
+
+ if (IS_XEHPSDV(i915)) {
+ /* Wa_1409954639 */
+ wa_masked_en(wal,
+ GEN8_ROW_CHICKEN,
+ SYSTOLIC_DOP_CLOCK_GATING_DIS);
+
+ /* Wa_1607196519 */
+ wa_masked_en(wal,
+ GEN9_ROW_CHICKEN4,
+ GEN12_DISABLE_GRF_CLEAR);
+
+ /* Wa_14010670810:xehpsdv */
+ wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
+
+ /* Wa_14010449647:xehpsdv */
+ wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+
+ /* Wa_18011725039:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
+ wa_masked_dis(wal, MLTICTXCTL, TDONRENDER);
+ wa_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
+ }
+
+ /* Wa_14012362059:xehpsdv */
+ wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_14014368820:xehpsdv */
+ wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
+ GLOBAL_INVALIDATION_MODE);
+ }
+
+ if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
+ /* Wa_14015227452:dg2,pvc */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
+
+ /* Wa_22014226127:dg2,pvc */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+
+ /* Wa_16015675438:dg2,pvc */
+ wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
+
+ /* Wa_18018781329:dg2,pvc */
+ wa_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+ }
+}
+
+static void
+engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
+ return;
+
+ engine_fake_wa_init(engine, wal);
+
+ /*
+ * These are common workarounds that just need to applied
+ * to a single RCS/CCS engine's workaround list since
+ * they're reset as part of the general render domain reset.
+ */
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ general_render_compute_wa_init(engine, wal);
+
+ if (engine->class == COMPUTE_CLASS)
+ ccs_engine_wa_init(engine, wal);
+ else if (engine->class == RENDER_CLASS)
+ rcs_engine_wa_init(engine, wal);
+ else
+ xcs_engine_wa_init(engine, wal);
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (GRAPHICS_VER(engine->i915) < 4)
+ return;
+
+ wa_init_start(wal, "engine", engine->name);
+ engine_init_workarounds(engine, wal);
+ wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+ wa_list_apply(engine->gt, &engine->wa_list);
+}
+
+static const struct i915_range mcr_ranges_gen8[] = {
+ { .start = 0x5500, .end = 0x55ff },
+ { .start = 0x7000, .end = 0x7fff },
+ { .start = 0x9400, .end = 0x97ff },
+ { .start = 0xb000, .end = 0xb3ff },
+ { .start = 0xe000, .end = 0xe7ff },
+ {},
+};
+
+static const struct i915_range mcr_ranges_gen12[] = {
+ { .start = 0x8150, .end = 0x815f },
+ { .start = 0x9520, .end = 0x955f },
+ { .start = 0xb100, .end = 0xb3ff },
+ { .start = 0xde80, .end = 0xe8ff },
+ { .start = 0x24a00, .end = 0x24a7f },
+ {},
+};
+
+static const struct i915_range mcr_ranges_xehp[] = {
+ { .start = 0x4000, .end = 0x4aff },
+ { .start = 0x5200, .end = 0x52ff },
+ { .start = 0x5400, .end = 0x7fff },
+ { .start = 0x8140, .end = 0x815f },
+ { .start = 0x8c80, .end = 0x8dff },
+ { .start = 0x94d0, .end = 0x955f },
+ { .start = 0x9680, .end = 0x96ff },
+ { .start = 0xb000, .end = 0xb3ff },
+ { .start = 0xc800, .end = 0xcfff },
+ { .start = 0xd800, .end = 0xd8ff },
+ { .start = 0xdc00, .end = 0xffff },
+ { .start = 0x17000, .end = 0x17fff },
+ { .start = 0x24a00, .end = 0x24a7f },
+ {},
+};
+
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+ const struct i915_range *mcr_ranges;
+ int i;
+
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ mcr_ranges = mcr_ranges_xehp;
+ else if (GRAPHICS_VER(i915) >= 12)
+ mcr_ranges = mcr_ranges_gen12;
+ else if (GRAPHICS_VER(i915) >= 8)
+ mcr_ranges = mcr_ranges_gen8;
+ else
+ return false;
+
+ /*
+ * Registers in these ranges are affected by the MCR selector
+ * which only controls CPU initiated MMIO. Routing does not
+ * work for CS access so we cannot verify them on this path.
+ */
+ for (i = 0; mcr_ranges[i].start; i++)
+ if (offset >= mcr_ranges[i].start &&
+ offset <= mcr_ranges[i].end)
+ return true;
+
+ return false;
+}
+
+static int
+wa_list_srm(struct i915_request *rq,
+ const struct i915_wa_list *wal,
+ struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = rq->engine->i915;
+ unsigned int i, count = 0;
+ const struct i915_wa *wa;
+ u32 srm, *cs;
+
+ srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (GRAPHICS_VER(i915) >= 8)
+ srm++;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
+ count++;
+ }
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 offset = i915_mmio_reg_offset(wa->reg);
+
+ if (mcr_range(i915, offset))
+ continue;
+
+ *cs++ = srm;
+ *cs++ = offset;
+ *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
+ *cs++ = 0;
+ }
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int engine_wa_list_verify(struct intel_context *ce,
+ const struct i915_wa_list * const wal,
+ const char *from)
+{
+ const struct i915_wa *wa;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ struct i915_gem_ww_ctx ww;
+ unsigned int i;
+ u32 *results;
+ int err;
+
+ if (!wal->count)
+ return 0;
+
+ vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
+ wal->count * sizeof(u32));
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ intel_engine_pm_get(ce->engine);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (err == 0)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto err_pm;
+
+ err = i915_vma_pin_ww(vma, &ww, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = wa_list_srm(rq, wal, vma);
+
+ i915_request_get(rq);
+ if (err)
+ i915_request_set_error_once(rq, err);
+ i915_request_add(rq);
+
+ if (err)
+ goto err_rq;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto err_rq;
+ }
+
+ err = 0;
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
+ continue;
+
+ if (!wa_verify(wa, results[i], wal->name, from))
+ err = -ENXIO;
+ }
+
+ i915_gem_object_unpin_map(vma->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_vma:
+ i915_vma_unpin(vma);
+err_unpin:
+ intel_context_unpin(ce);
+err_pm:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ intel_engine_pm_put(ce->engine);
+ i915_vma_put(vma);
+ return err;
+}
+
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return engine_wa_list_verify(engine->kernel_context,
+ &engine->wa_list,
+ from);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_workarounds.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
new file mode 100644
index 000000000..9beaab77c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_WORKAROUNDS_H_
+#define _INTEL_WORKAROUNDS_H_
+
+#include <linux/slab.h>
+
+#include "intel_workarounds_types.h"
+
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+struct intel_gt;
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+ kfree(wal->list);
+ memset(wal, 0, sizeof(*wal));
+}
+
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
+int intel_engine_emit_ctx_wa(struct i915_request *rq);
+
+void intel_gt_init_workarounds(struct intel_gt *gt);
+void intel_gt_apply_workarounds(struct intel_gt *gt);
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from);
+
+void intel_engine_init_whitelist(struct intel_engine_cs *engine);
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
new file mode 100644
index 000000000..8a4b6de4e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef __INTEL_WORKAROUNDS_TYPES_H__
+#define __INTEL_WORKAROUNDS_TYPES_H__
+
+#include <linux/types.h>
+
+#include "i915_reg_defs.h"
+
+struct i915_wa {
+ i915_reg_t reg;
+ u32 clr;
+ u32 set;
+ u32 read;
+ bool masked_reg;
+};
+
+struct i915_wa_list {
+ const char *name;
+ const char *engine_name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
new file mode 100644
index 000000000..610ca7687
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC
+ */
+
+static const u32 ivb_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x0000002c,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffffc,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffff8,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
new file mode 100644
index 000000000..c0637bf79
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "gem/i915_gem_context.h"
+#include "gt/intel_ring.h"
+
+#include "i915_drv.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+
+#include "mock_engine.h"
+#include "selftests/mock_request.h"
+
+static int mock_timeline_pin(struct intel_timeline *tl)
+{
+ int err;
+
+ if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL)))
+ return -EBUSY;
+
+ err = intel_timeline_pin_map(tl);
+ i915_gem_object_unlock(tl->hwsp_ggtt->obj);
+ if (err)
+ return err;
+
+ atomic_inc(&tl->pin_count);
+ return 0;
+}
+
+static void mock_timeline_unpin(struct intel_timeline *tl)
+{
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ atomic_dec(&tl->pin_count);
+}
+
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+{
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
+{
+ const unsigned long sz = PAGE_SIZE;
+ struct intel_ring *ring;
+
+ ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ kref_init(&ring->ref);
+ ring->size = sz;
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+ atomic_set(&ring->pin_count, 1);
+
+ ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE);
+ if (IS_ERR(ring->vma)) {
+ kfree(ring);
+ return NULL;
+ }
+
+ intel_ring_update_space(ring);
+
+ return ring;
+}
+
+static void mock_ring_free(struct intel_ring *ring)
+{
+ i915_vma_put(ring->vma);
+
+ kfree(ring);
+}
+
+static struct i915_request *first_request(struct mock_engine *engine)
+{
+ return list_first_entry_or_null(&engine->hw_queue,
+ struct i915_request,
+ mock.link);
+}
+
+static void advance(struct i915_request *request)
+{
+ list_del_init(&request->mock.link);
+ i915_request_mark_complete(request);
+ GEM_BUG_ON(!i915_request_completed(request));
+
+ intel_engine_signal_breadcrumbs(request->engine);
+}
+
+static void hw_delay_complete(struct timer_list *t)
+{
+ struct mock_engine *engine = from_timer(engine, t, hw_delay);
+ struct i915_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->hw_lock, flags);
+
+ /* Timer fired, first request is complete */
+ request = first_request(engine);
+ if (request)
+ advance(request);
+
+ /*
+ * Also immediately signal any subsequent 0-delay requests, but
+ * requeue the timer for the next delayed request.
+ */
+ while ((request = first_request(engine))) {
+ if (request->mock.delay) {
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
+ break;
+ }
+
+ advance(request);
+ }
+
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+}
+
+static void mock_context_unpin(struct intel_context *ce)
+{
+}
+
+static void mock_context_post_unpin(struct intel_context *ce)
+{
+ i915_vma_unpin(ce->ring->vma);
+}
+
+static void mock_context_destroy(struct kref *ref)
+{
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ mock_ring_free(ce->ring);
+ mock_timeline_unpin(ce->timeline);
+ }
+
+ intel_context_fini(ce);
+ intel_context_free(ce);
+}
+
+static int mock_context_alloc(struct intel_context *ce)
+{
+ int err;
+
+ ce->ring = mock_ring(ce->engine);
+ if (!ce->ring)
+ return -ENOMEM;
+
+ ce->timeline = intel_timeline_create(ce->engine->gt);
+ if (IS_ERR(ce->timeline)) {
+ kfree(ce->engine);
+ return PTR_ERR(ce->timeline);
+ }
+
+ err = mock_timeline_pin(ce->timeline);
+ if (err) {
+ intel_timeline_put(ce->timeline);
+ ce->timeline = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+static int mock_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww, void **unused)
+{
+ return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
+}
+
+static int mock_context_pin(struct intel_context *ce, void *unused)
+{
+ return 0;
+}
+
+static void mock_context_reset(struct intel_context *ce)
+{
+}
+
+static const struct intel_context_ops mock_context_ops = {
+ .alloc = mock_context_alloc,
+
+ .pre_pin = mock_context_pre_pin,
+ .pin = mock_context_pin,
+ .unpin = mock_context_unpin,
+ .post_unpin = mock_context_post_unpin,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .reset = mock_context_reset,
+ .destroy = mock_context_destroy,
+};
+
+static int mock_request_alloc(struct i915_request *request)
+{
+ INIT_LIST_HEAD(&request->mock.link);
+ request->mock.delay = 0;
+
+ return 0;
+}
+
+static int mock_emit_flush(struct i915_request *request,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
+{
+ return cs;
+}
+
+static void mock_submit_request(struct i915_request *request)
+{
+ struct mock_engine *engine =
+ container_of(request->engine, typeof(*engine), base);
+ unsigned long flags;
+
+ i915_request_submit(request);
+
+ spin_lock_irqsave(&engine->hw_lock, flags);
+ list_add_tail(&request->mock.link, &engine->hw_queue);
+ if (list_is_first(&request->mock.link, &engine->hw_queue)) {
+ if (request->mock.delay)
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
+ else
+ advance(request);
+ }
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+}
+
+static void mock_add_to_engine(struct i915_request *rq)
+{
+ lockdep_assert_held(&rq->engine->sched_engine->lock);
+ list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
+}
+
+static void mock_remove_from_engine(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine, *locked;
+
+ /*
+ * Virtual engines complicate acquiring the engine timeline lock,
+ * as their rq->engine pointer is not stable until under that
+ * engine lock. The simple ploy we use is to take the lock then
+ * check that the rq still belongs to the newly locked engine.
+ */
+
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->sched_engine->lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->sched_engine->lock);
+ spin_lock(&engine->sched_engine->lock);
+ locked = engine;
+ }
+ list_del_init(&rq->sched.link);
+ spin_unlock_irq(&locked->sched_engine->lock);
+}
+
+static void mock_reset_prepare(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+{
+ GEM_BUG_ON(stalled);
+}
+
+static void mock_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+ struct i915_request *rq;
+ unsigned long flags;
+
+ del_timer_sync(&mock->hw_delay);
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
+ i915_request_put(i915_request_mark_eio(rq));
+ intel_engine_signal_breadcrumbs(engine);
+
+ /* Cancel and submit all pending requests. */
+ list_for_each_entry(rq, &mock->hw_queue, mock.link) {
+ if (i915_request_mark_eio(rq)) {
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
+ }
+ INIT_LIST_HEAD(&mock->hw_queue);
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_engine_release(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+
+ GEM_BUG_ON(timer_pending(&mock->hw_delay));
+
+ i915_sched_engine_put(engine->sched_engine);
+ intel_breadcrumbs_put(engine->breadcrumbs);
+
+ intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+
+ intel_engine_fini_retire(engine);
+}
+
+struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
+ const char *name,
+ int id)
+{
+ struct mock_engine *engine;
+
+ GEM_BUG_ON(id >= I915_NUM_ENGINES);
+ GEM_BUG_ON(!to_gt(i915)->uncore);
+
+ engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
+ if (!engine)
+ return NULL;
+
+ /* minimal engine setup for requests */
+ engine->base.i915 = i915;
+ engine->base.gt = to_gt(i915);
+ engine->base.uncore = to_gt(i915)->uncore;
+ snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
+ engine->base.id = id;
+ engine->base.mask = BIT(id);
+ engine->base.legacy_idx = INVALID_ENGINE;
+ engine->base.instance = id;
+ engine->base.status_page.addr = (void *)(engine + 1);
+
+ engine->base.cops = &mock_context_ops;
+ engine->base.request_alloc = mock_request_alloc;
+ engine->base.emit_flush = mock_emit_flush;
+ engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
+ engine->base.submit_request = mock_submit_request;
+ engine->base.add_active_request = mock_add_to_engine;
+ engine->base.remove_active_request = mock_remove_from_engine;
+
+ engine->base.reset.prepare = mock_reset_prepare;
+ engine->base.reset.rewind = mock_reset_rewind;
+ engine->base.reset.cancel = mock_reset_cancel;
+ engine->base.reset.finish = mock_reset_finish;
+
+ engine->base.release = mock_engine_release;
+
+ to_gt(i915)->engine[id] = &engine->base;
+ to_gt(i915)->engine_class[0][id] = &engine->base;
+
+ /* fake hw queue */
+ spin_lock_init(&engine->hw_lock);
+ timer_setup(&engine->hw_delay, hw_delay_complete, 0);
+ INIT_LIST_HEAD(&engine->hw_queue);
+
+ intel_engine_add_user(&engine->base);
+
+ return &engine->base;
+}
+
+int mock_engine_init(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ INIT_LIST_HEAD(&engine->pinned_contexts_list);
+
+ engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
+ if (!engine->sched_engine)
+ return -ENOMEM;
+ engine->sched_engine->private_data = engine;
+
+ intel_engine_init_execlists(engine);
+ intel_engine_init__pm(engine);
+ intel_engine_init_retire(engine);
+
+ engine->breadcrumbs = intel_breadcrumbs_create(NULL);
+ if (!engine->breadcrumbs)
+ goto err_schedule;
+
+ ce = create_kernel_context(engine);
+ if (IS_ERR(ce))
+ goto err_breadcrumbs;
+
+ /* We insist the kernel context is using the status_page */
+ engine->status_page.vma = ce->timeline->hwsp_ggtt;
+
+ engine->kernel_context = ce;
+ return 0;
+
+err_breadcrumbs:
+ intel_breadcrumbs_put(engine->breadcrumbs);
+err_schedule:
+ i915_sched_engine_put(engine->sched_engine);
+ return -ENOMEM;
+}
+
+void mock_engine_flush(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+ struct i915_request *request, *rn;
+
+ del_timer_sync(&mock->hw_delay);
+
+ spin_lock_irq(&mock->hw_lock);
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
+ advance(request);
+ spin_unlock_irq(&mock->hw_lock);
+}
+
+void mock_engine_reset(struct intel_engine_cs *engine)
+{
+}
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.h b/drivers/gpu/drm/i915/gt/mock_engine.h
new file mode 100644
index 000000000..cc5ab6e1f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/mock_engine.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __MOCK_ENGINE_H__
+#define __MOCK_ENGINE_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+#include "gt/intel_engine.h"
+
+struct mock_engine {
+ struct intel_engine_cs base;
+
+ spinlock_t hw_lock;
+ struct list_head hw_queue;
+ struct timer_list hw_delay;
+};
+
+struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
+ const char *name,
+ int id);
+int mock_engine_init(struct intel_engine_cs *engine);
+
+void mock_engine_flush(struct intel_engine_cs *engine);
+void mock_engine_reset(struct intel_engine_cs *engine);
+void mock_engine_free(struct intel_engine_cs *engine);
+
+#endif /* !__MOCK_ENGINE_H__ */
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
new file mode 100644
index 000000000..76fbae358
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+
+static int request_sync(struct i915_request *rq)
+{
+ struct intel_timeline *tl = i915_request_timeline(rq);
+ long timeout;
+ int err = 0;
+
+ intel_timeline_get(tl);
+ i915_request_get(rq);
+
+ /* Opencode i915_request_add() so we can keep the timeline locked. */
+ __i915_request_commit(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ __i915_request_queue_bh(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 10);
+ if (timeout < 0)
+ err = timeout;
+ else
+ i915_request_retire_upto(rq);
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+ mutex_unlock(&tl->mutex);
+
+ i915_request_put(rq);
+ intel_timeline_put(tl);
+
+ return err;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct intel_timeline *tl = ce->timeline;
+ int err = 0;
+
+ mutex_lock(&tl->mutex);
+ do {
+ struct i915_request *rq;
+ long timeout;
+
+ if (list_empty(&tl->requests))
+ break;
+
+ rq = list_last_entry(&tl->requests, typeof(*rq), link);
+ i915_request_get(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 10);
+ if (timeout < 0)
+ err = timeout;
+ else
+ i915_request_retire_upto(rq);
+
+ i915_request_put(rq);
+ } while (!err);
+ mutex_unlock(&tl->mutex);
+
+ /* Wait for all barriers to complete (remote CPU) before we check */
+ i915_active_unlock_wait(&ce->active);
+ return err;
+}
+
+static int __live_context_size(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ void *vaddr;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto err;
+
+ vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj, false));
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ intel_context_unpin(ce);
+ goto err;
+ }
+
+ /*
+ * Note that execlists also applies a redzone which it checks on
+ * context unpin when debugging. We are using the same location
+ * and same poison value so that our checks overlap. Despite the
+ * redundancy, we want to keep this little selftest so that we
+ * get coverage of any and all submission backends, and we can
+ * always extend this test to ensure we trick the HW into a
+ * compromising position wrt to the various sections that need
+ * to be written into the context state.
+ *
+ * TLDR; this overlaps with the execlists redzone.
+ */
+ vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
+ memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ intel_context_unpin(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = request_sync(rq);
+ if (err)
+ goto err_unpin;
+
+ /* Force the context switch */
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+ err = request_sync(rq);
+ if (err)
+ goto err_unpin;
+
+ if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
+ pr_err("%s context overwrote trailing red-zone!", engine->name);
+ err = -EINVAL;
+ }
+
+err_unpin:
+ i915_gem_object_unpin_map(ce->state->obj);
+err:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_context_size(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that our context sizes are correct by seeing if the
+ * HW tries to write past the end of one.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct file *saved;
+
+ if (!engine->context_size)
+ continue;
+
+ intel_engine_pm_get(engine);
+
+ /*
+ * Hide the old default state -- we lie about the context size
+ * and get confused when the default state is smaller than
+ * expected. For our do nothing request, inheriting the
+ * active state is sufficient, we are only checking that we
+ * don't use more than we planned.
+ */
+ saved = fetch_and_zero(&engine->default_state);
+
+ /* Overlaps with the execlists redzone */
+ engine->context_size += I915_GTT_PAGE_SIZE;
+
+ err = __live_context_size(engine);
+
+ engine->context_size -= I915_GTT_PAGE_SIZE;
+
+ engine->default_state = saved;
+
+ intel_engine_pm_put(engine);
+
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __live_active_context(struct intel_engine_cs *engine)
+{
+ unsigned long saved_heartbeat;
+ struct intel_context *ce;
+ int pass;
+ int err;
+
+ /*
+ * We keep active contexts alive until after a subsequent context
+ * switch as the final write from the context-save will be after
+ * we retire the final request. We track when we unpin the context,
+ * under the presumption that the final pin is from the last request,
+ * and instead of immediately unpinning the context, we add a task
+ * to unpin the context from the next idle-barrier.
+ *
+ * This test makes sure that the context is kept alive until a
+ * subsequent idle-barrier (emitted when the engine wakeref hits 0
+ * with no more outstanding requests).
+ *
+ * In GuC submission mode we don't use idle barriers and we instead
+ * get a message from the GuC to signal that it is safe to unpin the
+ * context from memory.
+ */
+ if (intel_engine_uses_guc(engine))
+ return 0;
+
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is awake before starting %s!\n",
+ engine->name, __func__);
+ return -EINVAL;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ saved_heartbeat = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ for (pass = 0; pass <= 2; pass++) {
+ struct i915_request *rq;
+
+ intel_engine_pm_get(engine);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_engine;
+ }
+
+ err = request_sync(rq);
+ if (err)
+ goto out_engine;
+
+ /* Context will be kept active until after an idle-barrier. */
+ if (i915_active_is_idle(&ce->active)) {
+ pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
+ engine->name, pass);
+ err = -EINVAL;
+ goto out_engine;
+ }
+
+ if (!intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is asleep before idle-barrier\n",
+ engine->name);
+ err = -EINVAL;
+ goto out_engine;
+ }
+
+out_engine:
+ intel_engine_pm_put(engine);
+ if (err)
+ goto err;
+ }
+
+ /* Now make sure our idle-barriers are flushed */
+ err = intel_engine_flush_barriers(engine);
+ if (err)
+ goto err;
+
+ /* Wait for the barrier and in the process wait for engine to park */
+ err = context_sync(engine->kernel_context);
+ if (err)
+ goto err;
+
+ if (!i915_active_is_idle(&ce->active)) {
+ pr_err("context is still active!");
+ err = -EINVAL;
+ }
+
+ intel_engine_pm_flush(engine);
+
+ if (intel_engine_pm_is_awake(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p,
+ "%s is still awake:%d after idle-barriers\n",
+ engine->name,
+ atomic_read(&engine->wakeref.count));
+ GEM_TRACE_DUMP();
+
+ err = -EINVAL;
+ goto err;
+ }
+
+err:
+ engine->props.heartbeat_interval_ms = saved_heartbeat;
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_active_context(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_active_context(engine);
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(remote);
+ if (err)
+ return err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ err = intel_context_prepare_remote_request(remote, rq);
+ if (err) {
+ i915_request_add(rq);
+ goto unpin;
+ }
+
+ err = request_sync(rq);
+
+unpin:
+ intel_context_unpin(remote);
+ return err;
+}
+
+static int __live_remote_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *local, *remote;
+ unsigned long saved_heartbeat;
+ int pass;
+ int err;
+
+ /*
+ * Check that our idle barriers do not interfere with normal
+ * activity tracking. In particular, check that operating
+ * on the context image remotely (intel_context_prepare_remote_request),
+ * which inserts foreign fences into intel_context.active, does not
+ * clobber the idle-barrier.
+ *
+ * In GuC submission mode we don't use idle barriers.
+ */
+ if (intel_engine_uses_guc(engine))
+ return 0;
+
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is awake before starting %s!\n",
+ engine->name, __func__);
+ return -EINVAL;
+ }
+
+ remote = intel_context_create(engine);
+ if (IS_ERR(remote))
+ return PTR_ERR(remote);
+
+ local = intel_context_create(engine);
+ if (IS_ERR(local)) {
+ err = PTR_ERR(local);
+ goto err_remote;
+ }
+
+ saved_heartbeat = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+ intel_engine_pm_get(engine);
+
+ for (pass = 0; pass <= 2; pass++) {
+ err = __remote_sync(local, remote);
+ if (err)
+ break;
+
+ err = __remote_sync(engine->kernel_context, remote);
+ if (err)
+ break;
+
+ if (i915_active_is_idle(&remote->active)) {
+ pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
+ engine->name, pass);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ intel_engine_pm_put(engine);
+ engine->props.heartbeat_interval_ms = saved_heartbeat;
+
+ intel_context_put(local);
+err_remote:
+ intel_context_put(remote);
+ return err;
+}
+
+static int live_remote_context(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_remote_context(engine);
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int intel_context_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_context_size),
+ SUBTEST(live_active_context),
+ SUBTEST(live_remote_context),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c
new file mode 100644
index 000000000..57fea9ea1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+
+int intel_engine_live_selftests(struct drm_i915_private *i915)
+{
+ static int (* const tests[])(struct intel_gt *) = {
+ live_engine_pm_selftests,
+ NULL,
+ };
+ struct intel_gt *gt = to_gt(i915);
+ typeof(*tests) *fn;
+
+ for (fn = tests; *fn; fn++) {
+ int err;
+
+ err = (*fn)(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.h b/drivers/gpu/drm/i915/gt/selftest_engine.h
new file mode 100644
index 000000000..c6feb3bd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_ENGINE_H
+#define SELFTEST_ENGINE_H
+
+struct intel_gt;
+
+int live_engine_pm_selftests(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
new file mode 100644
index 000000000..1b75f478d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "intel_gpu_commands.h"
+#include "intel_gt_pm.h"
+#include "intel_rps.h"
+
+#include "i915_selftest.h"
+#include "selftests/igt_flush_test.h"
+
+#define COUNT 5
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static void perf_begin(struct intel_gt *gt)
+{
+ intel_gt_pm_get(gt);
+
+ /* Boost gpufreq to max [waitboost] and keep it fixed */
+ atomic_inc(&gt->rps.num_waiters);
+ schedule_work(&gt->rps.work);
+ flush_work(&gt->rps.work);
+}
+
+static int perf_end(struct intel_gt *gt)
+{
+ atomic_dec(&gt->rps.num_waiters);
+ intel_gt_pm_put(gt);
+
+ return igt_flush_test(gt->i915);
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ struct intel_timeline *tl =
+ rcu_dereference_protected(rq->timeline,
+ !i915_request_signaled(rq));
+ u32 cmd;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ cmd++;
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = tl->hwsp_offset + slot * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static struct i915_vma *create_empty_batch(struct intel_context *ce)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_put;
+ }
+
+ cs[0] = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_unpin_map(obj);
+ return vma;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 trifilter(u32 *a)
+{
+ u64 sum;
+
+ sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
+
+ sum = mul_u32_u32(a[2], 2);
+ sum += a[1];
+ sum += a[3];
+
+ return sum >> 2;
+}
+
+static int perf_mi_bb_start(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ return 0;
+
+ perf_begin(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_vma *batch;
+ u32 cycles[COUNT];
+ int i;
+
+ intel_engine_pm_get(engine);
+
+ batch = create_empty_batch(ce);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(batch);
+ if (err) {
+ intel_engine_pm_put(engine);
+ i915_vma_put(batch);
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = write_timestamp(rq, 2);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, 8,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 3);
+ if (err)
+ goto out;
+
+out:
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
+ }
+ i915_vma_put(batch);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+
+ pr_info("%s: MI_BB_START cycles: %u\n",
+ engine->name, trifilter(cycles));
+ }
+ if (perf_end(gt))
+ err = -EIO;
+
+ return err;
+}
+
+static struct i915_vma *create_nop_batch(struct intel_context *ce)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_put;
+ }
+
+ memset(cs, 0, SZ_64K);
+ cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_unpin_map(obj);
+ return vma;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int perf_mi_noop(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ return 0;
+
+ perf_begin(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_vma *base, *nop;
+ u32 cycles[COUNT];
+ int i;
+
+ intel_engine_pm_get(engine);
+
+ base = create_empty_batch(ce);
+ if (IS_ERR(base)) {
+ err = PTR_ERR(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(base);
+ if (err) {
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ nop = create_nop_batch(ce);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(nop);
+ if (err) {
+ i915_vma_put(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = write_timestamp(rq, 2);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ base->node.start, 8,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 3);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ nop->node.start,
+ nop->node.size,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 4);
+ if (err)
+ goto out;
+
+out:
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ cycles[i] =
+ (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
+ (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
+ }
+ i915_vma_put(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+
+ pr_info("%s: 16K MI_NOOP cycles: %u\n",
+ engine->name, trifilter(cycles));
+ }
+ if (perf_end(gt))
+ err = -EIO;
+
+ return err;
+}
+
+int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_mi_bb_start),
+ SUBTEST(perf_mi_noop),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
+
+static int intel_mmio_bases_check(void *arg)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
+ const struct engine_info *info = &intel_engines[i];
+ u8 prev = U8_MAX;
+
+ for (j = 0; j < MAX_MMIO_BASES; j++) {
+ u8 ver = info->mmio_bases[j].graphics_ver;
+ u32 base = info->mmio_bases[j].base;
+
+ if (ver >= prev) {
+ pr_err("%s(%s, class:%d, instance:%d): mmio base for graphics ver %u is before the one for ver %u\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->class, info->instance,
+ prev, ver);
+ return -EINVAL;
+ }
+
+ if (ver == 0)
+ break;
+
+ if (!base) {
+ pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for graphics ver %u at entry %u\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->class, info->instance,
+ base, ver, j);
+ return -EINVAL;
+ }
+
+ prev = ver;
+ }
+
+ pr_debug("%s: min graphics version supported for %s%d is %u\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->instance,
+ prev);
+ }
+
+ return 0;
+}
+
+int intel_engine_cs_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_mmio_bases_check),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
new file mode 100644
index 000000000..273d440a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "i915_drv.h"
+
+#include "intel_gt_requests.h"
+#include "i915_selftest.h"
+#include "selftest_engine_heartbeat.h"
+
+static void reset_heartbeat(struct intel_engine_cs *engine)
+{
+ intel_engine_set_heartbeat(engine,
+ engine->defaults.heartbeat_interval_ms);
+}
+
+static int timeline_sync(struct intel_timeline *tl)
+{
+ struct dma_fence *fence;
+ long timeout;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
+ return 0;
+
+ timeout = dma_fence_wait_timeout(fence, true, HZ / 2);
+ dma_fence_put(fence);
+ if (timeout < 0)
+ return timeout;
+
+ return 0;
+}
+
+static int engine_sync_barrier(struct intel_engine_cs *engine)
+{
+ return timeline_sync(engine->kernel_context->timeline);
+}
+
+struct pulse {
+ struct i915_active active;
+ struct kref kref;
+};
+
+static int pulse_active(struct i915_active *active)
+{
+ kref_get(&container_of(active, struct pulse, active)->kref);
+ return 0;
+}
+
+static void pulse_free(struct kref *kref)
+{
+ struct pulse *p = container_of(kref, typeof(*p), kref);
+
+ i915_active_fini(&p->active);
+ kfree(p);
+}
+
+static void pulse_put(struct pulse *p)
+{
+ kref_put(&p->kref, pulse_free);
+}
+
+static void pulse_retire(struct i915_active *active)
+{
+ pulse_put(container_of(active, struct pulse, active));
+}
+
+static struct pulse *pulse_create(void)
+{
+ struct pulse *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return p;
+
+ kref_init(&p->kref);
+ i915_active_init(&p->active, pulse_active, pulse_retire, 0);
+
+ return p;
+}
+
+static void pulse_unlock_wait(struct pulse *p)
+{
+ i915_active_unlock_wait(&p->active);
+}
+
+static int __live_idle_pulse(struct intel_engine_cs *engine,
+ int (*fn)(struct intel_engine_cs *cs))
+{
+ struct pulse *p;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ p = pulse_create();
+ if (!p)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&p->active);
+ if (err)
+ goto out;
+
+ err = i915_active_acquire_preallocate_barrier(&p->active, engine);
+ if (err) {
+ i915_active_release(&p->active);
+ goto out;
+ }
+
+ i915_active_acquire_barrier(&p->active);
+ i915_active_release(&p->active);
+
+ GEM_BUG_ON(i915_active_is_idle(&p->active));
+ GEM_BUG_ON(llist_empty(&engine->barrier_tasks));
+
+ err = fn(engine);
+ if (err)
+ goto out;
+
+ GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+
+ if (engine_sync_barrier(engine)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: no heartbeat pulse?\n", engine->name);
+ intel_engine_dump(engine, &m, "%s", engine->name);
+
+ err = -ETIME;
+ goto out;
+ }
+
+ GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial);
+
+ pulse_unlock_wait(p); /* synchronize with the retirement callback */
+
+ if (!i915_active_is_idle(&p->active)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: heartbeat pulse did not flush idle tasks\n",
+ engine->name);
+ i915_active_print(&p->active, &m);
+
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ pulse_put(p);
+ return err;
+}
+
+static int live_idle_flush(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ st_engine_heartbeat_disable(engine);
+ err = __live_idle_pulse(engine, intel_engine_flush_barriers);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int live_idle_pulse(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that heartbeat pulses flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ st_engine_heartbeat_disable(engine);
+ err = __live_idle_pulse(engine, intel_engine_pulse);
+ st_engine_heartbeat_enable(engine);
+ if (err && err != -ENODEV)
+ break;
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int cmp_u32(const void *_a, const void *_b)
+{
+ const u32 *a = _a, *b = _b;
+
+ return *a - *b;
+}
+
+static int __live_heartbeat_fast(struct intel_engine_cs *engine)
+{
+ const unsigned int error_threshold = max(20000u, jiffies_to_usecs(6));
+ struct intel_context *ce;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u32 times[5];
+ int err;
+ int i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ intel_engine_pm_get(engine);
+
+ err = intel_engine_set_heartbeat(engine, 1);
+ if (err)
+ goto err_pm;
+
+ for (i = 0; i < ARRAY_SIZE(times); i++) {
+ do {
+ /* Manufacture a tick */
+ intel_engine_park_heartbeat(engine);
+ GEM_BUG_ON(engine->heartbeat.systole);
+ engine->serial++; /* pretend we are not idle! */
+ intel_engine_unpark_heartbeat(engine);
+
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat %d did not start\n",
+ engine->name, i);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ rq = i915_request_get_rcu(rq);
+ rcu_read_unlock();
+ } while (!rq);
+
+ t0 = ktime_get();
+ while (rq == READ_ONCE(engine->heartbeat.systole))
+ yield(); /* work is on the local cpu! */
+ t1 = ktime_get();
+
+ i915_request_put(rq);
+ times[i] = ktime_us_delta(t1, t0);
+ }
+
+ sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL);
+
+ pr_info("%s: Heartbeat delay: %uus [%u, %u]\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ times[0],
+ times[ARRAY_SIZE(times) - 1]);
+
+ /*
+ * Ideally, the upper bound on min work delay would be something like
+ * 2 * 2 (worst), +1 for scheduling, +1 for slack. In practice, we
+ * are, even with system_wq_highpri, at the mercy of the CPU scheduler
+ * and may be stuck behind some slow work for many millisecond. Such
+ * as our very own display workers.
+ */
+ if (times[ARRAY_SIZE(times) / 2] > error_threshold) {
+ pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ error_threshold);
+ err = -EINVAL;
+ }
+
+ reset_heartbeat(engine);
+err_pm:
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_heartbeat_fast(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the heartbeat ticks at the desired rate. */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_heartbeat_fast(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __live_heartbeat_off(struct intel_engine_cs *engine)
+{
+ int err;
+
+ intel_engine_pm_get(engine);
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat not running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ err = intel_engine_set_heartbeat(engine, 0);
+ if (err)
+ goto err_pm;
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat still running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+ if (READ_ONCE(engine->heartbeat.systole)) {
+ pr_err("%s: heartbeat still allocated\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+err_beat:
+ reset_heartbeat(engine);
+err_pm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+static int live_heartbeat_off(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can turn off heartbeat and not interrupt VIP */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ err = __live_heartbeat_off(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_idle_flush),
+ SUBTEST(live_idle_pulse),
+ SUBTEST(live_heartbeat_fast),
+ SUBTEST(live_heartbeat_off),
+ };
+ int saved_hangcheck;
+ int err;
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ saved_hangcheck = i915->params.enable_hangcheck;
+ i915->params.enable_hangcheck = INT_MAX;
+
+ err = intel_gt_live_subtests(tests, to_gt(i915));
+
+ i915->params.enable_hangcheck = saved_hangcheck;
+ return err;
+}
+
+void st_engine_heartbeat_disable(struct intel_engine_cs *engine)
+{
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+void st_engine_heartbeat_enable(struct intel_engine_cs *engine)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms =
+ engine->defaults.heartbeat_interval_ms;
+}
+
+void st_engine_heartbeat_disable_no_pm(struct intel_engine_cs *engine)
+{
+ engine->props.heartbeat_interval_ms = 0;
+
+ /*
+ * Park the heartbeat but without holding the PM lock as that
+ * makes the engines appear not-idle. Note that if/when unpark
+ * is called due to the PM lock being acquired later the
+ * heartbeat still won't be enabled because of the above = 0.
+ */
+ if (intel_engine_pm_get_if_awake(engine)) {
+ intel_engine_park_heartbeat(engine);
+ intel_engine_pm_put(engine);
+ }
+}
+
+void st_engine_heartbeat_enable_no_pm(struct intel_engine_cs *engine)
+{
+ engine->props.heartbeat_interval_ms =
+ engine->defaults.heartbeat_interval_ms;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h
new file mode 100644
index 000000000..81da2cd8e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_ENGINE_HEARTBEAT_H
+#define SELFTEST_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+
+void st_engine_heartbeat_disable(struct intel_engine_cs *engine);
+void st_engine_heartbeat_disable_no_pm(struct intel_engine_cs *engine);
+void st_engine_heartbeat_enable(struct intel_engine_cs *engine);
+void st_engine_heartbeat_enable_no_pm(struct intel_engine_cs *engine);
+
+#endif /* SELFTEST_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
new file mode 100644
index 000000000..0dcb3ed44
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "i915_selftest.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
+#include "selftest_engine.h"
+#include "selftest_engine_heartbeat.h"
+#include "selftests/igt_atomic.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_spinner.h"
+
+#define COUNT 5
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static u64 trifilter(u64 *a)
+{
+ sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
+ return (a[1] + 2 * a[2] + a[3]) >> 2;
+}
+
+static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ op;
+ *cs++ = value;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_store(u32 *cs, u32 offset, u32 value)
+{
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(reg);
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static void write_semaphore(u32 *x, u32 value)
+{
+ WRITE_ONCE(*x, value);
+ wmb();
+}
+
+static int __measure_timestamps(struct intel_context *ce,
+ u64 *dt, u64 *d_ring, u64 *d_ctx)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
+ u32 offset = i915_ggtt_offset(engine->status_page.vma);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 28);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ /* Signal & wait for start */
+ cs = emit_store(cs, offset + 4008, 1);
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
+
+ /* Busy wait */
+ cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
+
+ cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
+ cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
+
+ intel_ring_advance(rq, cs);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ intel_engine_flush_submission(engine);
+
+ /* Wait for the request to start executing, that then waits for us */
+ while (READ_ONCE(sema[2]) == 0)
+ cpu_relax();
+
+ /* Run the request for a 100us, sampling timestamps before/after */
+ local_irq_disable();
+ write_semaphore(&sema[2], 0);
+ while (READ_ONCE(sema[1]) == 0) /* wait for the gpu to catch up */
+ cpu_relax();
+ *dt = local_clock();
+ udelay(100);
+ *dt = local_clock() - *dt;
+ write_semaphore(&sema[2], 1);
+ local_irq_enable();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ return -ETIME;
+ }
+ i915_request_put(rq);
+
+ pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
+ engine->name, sema[1], sema[3], sema[0], sema[4]);
+
+ *d_ctx = sema[3] - sema[1];
+ *d_ring = sema[4] - sema[0];
+ return 0;
+}
+
+static int __live_engine_timestamps(struct intel_engine_cs *engine)
+{
+ u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
+ struct intel_context *ce;
+ int i, err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < COUNT; i++) {
+ err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ dt = trifilter(st);
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
+ engine->name, dt,
+ intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
+ intel_gt_clock_interval_to_ns(engine->gt, d_ring));
+
+ d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
+ if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
+ pr_err("%s Mismatch between ring timestamp and walltime!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ d_ring = trifilter(s_ring);
+ d_ctx = trifilter(s_ctx);
+
+ d_ctx *= engine->gt->clock_frequency;
+ if (GRAPHICS_VER(engine->i915) == 11)
+ d_ring *= 12500000; /* Fixed 80ns for GEN11 ctx timestamp? */
+ else
+ d_ring *= engine->gt->clock_frequency;
+
+ if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
+ pr_err("%s Mismatch between ring and context timestamps!\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int live_engine_timestamps(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
+ * the same CS clock.
+ */
+
+ if (GRAPHICS_VER(gt->i915) < 8)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ int err;
+
+ st_engine_heartbeat_disable(engine);
+ err = __live_engine_timestamps(engine);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
+{
+ ktime_t start, unused, dt;
+
+ if (!intel_engine_uses_guc(engine))
+ return 0;
+
+ /*
+ * In GuC mode of submission, the busyness stats may get updated after
+ * the batch starts running. Poll for a change in busyness and timeout
+ * after 500 us.
+ */
+ start = ktime_get();
+ while (intel_engine_get_busy_time(engine, &unused) == busyness) {
+ dt = ktime_get() - start;
+ if (dt > 10000000) {
+ pr_err("active wait timed out %lld\n", dt);
+ ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
+ return -ETIME;
+ }
+ }
+
+ return 0;
+}
+
+static int live_engine_busy_stats(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Check that if an engine supports busy-stats, they tell the truth.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ ktime_t busyness, dummy;
+ ktime_t de, dt;
+ ktime_t t[2];
+
+ if (!intel_engine_supports_stats(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ err = -EBUSY;
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+
+ ENGINE_TRACE(engine, "measuring idle time\n");
+ preempt_disable();
+ de = intel_engine_get_busy_time(engine, &t[0]);
+ udelay(100);
+ de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
+ preempt_enable();
+ dt = ktime_sub(t[1], t[0]);
+ if (de < 0 || de > 10) {
+ pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
+ engine->name,
+ de, (int)div64_u64(100 * de, dt), dt);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ goto end;
+ }
+
+ /* 100% busy */
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+ i915_request_add(rq);
+
+ busyness = intel_engine_get_busy_time(engine, &dummy);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ err = -ETIME;
+ goto end;
+ }
+
+ err = __spin_until_busier(engine, busyness);
+ if (err) {
+ GEM_TRACE_DUMP();
+ goto end;
+ }
+
+ ENGINE_TRACE(engine, "measuring busy time\n");
+ preempt_disable();
+ de = intel_engine_get_busy_time(engine, &t[0]);
+ mdelay(10);
+ de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
+ preempt_enable();
+ dt = ktime_sub(t[1], t[0]);
+ if (100 * de < 95 * dt || 95 * de > 100 * dt) {
+ pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
+ engine->name,
+ de, (int)div64_u64(100 * de, dt), dt);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ goto end;
+ }
+
+end:
+ st_engine_heartbeat_enable(engine);
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_engine_pm(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check we can call intel_engine_pm_put from any context. No
+ * failures are reported directly, but if we mess up lockdep should
+ * tell us.
+ */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("Unable to flush GT pm before test\n");
+ return -EBUSY;
+ }
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ for_each_engine(engine, gt, id) {
+ const typeof(*igt_atomic_phases) *p;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ /*
+ * Acquisition is always synchronous, except if we
+ * know that the engine is already awake, in which
+ * case we should use intel_engine_pm_get_if_awake()
+ * to atomically grab the wakeref.
+ *
+ * In practice,
+ * intel_engine_pm_get();
+ * intel_engine_pm_put();
+ * occurs in one thread, while simultaneously
+ * intel_engine_pm_get_if_awake();
+ * intel_engine_pm_put();
+ * occurs from atomic context in another.
+ */
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+ intel_engine_pm_get(engine);
+
+ p->critical_section_begin();
+ if (!intel_engine_pm_get_if_awake(engine))
+ pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
+ engine->name, p->name);
+ else
+ intel_engine_pm_put_async(engine);
+ intel_engine_pm_put_async(engine);
+ p->critical_section_end();
+
+ intel_engine_pm_flush(engine);
+
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is still awake after flushing pm\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ /* gt wakeref is async (deferred to workqueue) */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("GT failed to idle\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int live_engine_pm_selftests(struct intel_gt *gt)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_timestamps),
+ SUBTEST(live_engine_busy_stats),
+ SUBTEST(live_engine_pm),
+ };
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
new file mode 100644
index 000000000..b370411d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -0,0 +1,4521 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_reset.h"
+#include "gt/selftest_engine_heartbeat.h"
+
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
+
+ timeout += jiffies;
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ if (i915_request_completed(rq)) /* that was quick! */
+ return 0;
+
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+ return 0;
+
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ctx;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ GEM_TRACE("spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_ctx;
+ }
+
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto out_ctx;
+ }
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ lrc_update_regs(ce[1], engine, ce[1]->ring->head);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->sched_engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_PRIORITY_MAX);
+}
+
+static int live_unlite_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Setup a preemption event that will cause almost the entire ring
+ * to be unwound, potentially fooling our intel_ring_direction()
+ * into emitting a forward lite-restore instead of the rollback.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ /* Create max prio spinner, followed by N low prio nops */
+ rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (intel_ring_direction(ce[0]->ring,
+ rq->wa_tail,
+ ce[0]->ring->tail) <= 0) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
+ rq->tail,
+ ce[0]->ring->tail) <= 0);
+ i915_request_put(rq);
+
+ /* Create a second ring to preempt the first ring after rq[0] */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submitted\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ st_engine_heartbeat_enable(engine);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_pin_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * We have to be careful not to trust intel_ring too much, for example
+ * ring->head is updated upon retire which is out of sync with pinning
+ * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+ * or else we risk writing an older, stale value.
+ *
+ * To simulate this, let's apply a bit of deliberate sabotague.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct intel_ring *ring;
+ struct igt_live_test t;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ break;
+ }
+
+ /* Keep the context awake while we play games */
+ err = i915_active_acquire(&ce->active);
+ if (err) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ break;
+ }
+ ring = ce->ring;
+
+ /* Poison the ring, and offset the next request from HEAD */
+ memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+ ring->emit = ring->size / 2;
+ ring->tail = ring->emit;
+ GEM_BUG_ON(ring->head);
+
+ intel_context_unpin(ce);
+
+ /* Submit a simple nop request */
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+ rq = intel_context_create_request(ce);
+ i915_active_release(&ce->active); /* e.g. async retire */
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+ GEM_BUG_ON(!rq->head);
+ i915_request_add(rq);
+
+ /* Expect not to hang! */
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
+{
+ tasklet_disable(&engine->sched_engine->tasklet);
+ local_bh_disable();
+
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &engine->gt->reset.flags)) {
+ local_bh_enable();
+ tasklet_enable(&engine->sched_engine->tasklet);
+
+ intel_gt_set_wedged(engine->gt);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
+{
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &engine->gt->reset.flags);
+
+ local_bh_enable();
+ tasklet_enable(&engine->sched_engine->tasklet);
+}
+
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ err = engine_lock_reset_tasklet(engine);
+ if (err)
+ goto out;
+
+ engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ engine_unlock_reset_tasklet(engine);
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static const char *error_repr(int err)
+{
+ return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+ static const struct error_phase {
+ enum { GOOD = 0, BAD = -EIO } error[2];
+ } phases[] = {
+ { { BAD, GOOD } },
+ { { BAD, BAD } },
+ { { BAD, GOOD } },
+ { { GOOD, GOOD } }, /* sentinel */
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+ * of invalid commands in user batches that will cause a GPU hang.
+ * This is a faster mechanism than using hangcheck/heartbeats, but
+ * only detects problems the HW knows about -- it will not warn when
+ * we kill the HW!
+ *
+ * To verify our detection and reset, we throw some invalid commands
+ * at the HW and wait for the interrupt.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ const struct error_phase *p;
+ int err = 0;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (p = phases; p->error[0] != GOOD; p++) {
+ struct i915_request *client[ARRAY_SIZE(phases->error)];
+ u32 *cs;
+ int i;
+
+ memset(client, 0, sizeof(*client));
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ if (p->error[i]) {
+ *cs++ = 0xdeadbeef;
+ *cs++ = 0xdeadbeef;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ client[i] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ err = wait_for_submit(engine, client[0], HZ / 2);
+ if (err) {
+ pr_err("%s: first request did not start within time!\n",
+ engine->name);
+ err = -ETIME;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+ pr_debug("%s: %s request incomplete!\n",
+ engine->name,
+ error_repr(p->error[i]));
+
+ if (!i915_request_started(client[i])) {
+ pr_err("%s: %s request not started!\n",
+ engine->name,
+ error_repr(p->error[i]));
+ err = -ETIME;
+ goto out;
+ }
+
+ /* Kick the tasklet to process the error */
+ intel_engine_flush_submission(engine);
+ if (client[i]->fence.error != p->error[i]) {
+ pr_err("%s: %s request (%s) with wrong error code: %d\n",
+ engine->name,
+ error_repr(p->error[i]),
+ i915_request_completed(client[i]) ? "completed" : "running",
+ client[i]->fence.error);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ if (client[i])
+ i915_request_put(client[i]);
+ if (err) {
+ pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+ engine->name, p - phases,
+ p->error[0], p->error[1]);
+ break;
+ }
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err) {
+ intel_gt_set_wedged(gt);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + 4 * idx;
+ *cs++ = 0;
+
+ if (idx > 0) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto out_ce;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_ce:
+ intel_context_put(ce);
+ return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+ struct i915_vma *vma,
+ int idx, int prio)
+{
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ local_bh_disable();
+ engine->sched_engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
+
+ return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+ struct i915_vma *vma,
+ int count)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *head;
+ enum intel_engine_id id;
+ int err, i, n = 0;
+
+ head = semaphore_queue(outer, vma, n++);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ for_each_engine(engine, outer->gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+
+ rq = semaphore_queue(engine, vma, n++);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_put(rq);
+ }
+ }
+
+ err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(head, 0,
+ 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
+ pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
+ outer->name, count, n);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(outer->gt);
+ err = -EIO;
+ }
+
+out:
+ i915_request_put(head);
+ return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * If a request takes too long, we would like to give other users
+ * a fair go on the GPU. In particular, users may create batches
+ * that wait upon external input, where that input may even be
+ * supplied by another GPU job. To avoid blocking forever, we
+ * need to preempt the current task and replace it with another
+ * ready task.
+ */
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ st_engine_heartbeat_disable(engine);
+ err = slice_semaphore_queue(engine, vma, 5);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto err_pin;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ goto err_pin;
+ }
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+ struct i915_request *wait,
+ void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ if (wait) {
+ err = i915_request_await_dma_fence(rq, &wait->fence);
+ if (err)
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = idx;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = idx + 1;
+
+ intel_ring_advance(rq, cs);
+
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int live_timeslice_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * The usual presumption on timeslice expiration is that we replace
+ * the active context with another. However, given a chain of
+ * dependencies we may end up with replacing the context with itself,
+ * but only a few of those requests, forcing us to rewind the
+ * RING_TAIL of the original request.
+ */
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ enum { A1, A2, B1 };
+ enum { X = 1, Z, Y };
+ struct i915_request *rq[3] = {};
+ struct intel_context *ce;
+ unsigned long timeslice;
+ int i, err = 0;
+ u32 *slot;
+
+ if (!intel_engine_has_timeslices(engine))
+ continue;
+
+ /*
+ * A:rq1 -- semaphore wait, timestamp X
+ * A:rq2 -- write timestamp Y
+ *
+ * B:rq1 [await A:rq1] -- write timestamp Z
+ *
+ * Force timeslice, release semaphore.
+ *
+ * Expect execution/evaluation order XZY
+ */
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[A1] = create_rewinder(ce, NULL, slot, X);
+ if (IS_ERR(rq[A1])) {
+ intel_context_put(ce);
+ goto err;
+ }
+
+ rq[A2] = create_rewinder(ce, NULL, slot, Y);
+ intel_context_put(ce);
+ if (IS_ERR(rq[A2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[A2], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit first context\n",
+ engine->name);
+ goto err;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
+ intel_context_put(ce);
+ if (IS_ERR(rq[2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[B1], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit second context\n",
+ engine->name);
+ goto err;
+ }
+
+ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
+ while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
+ intel_engine_flush_submission(engine);
+ }
+ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+ GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+ /* Release the hounds! */
+ slot[0] = 1;
+ wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+ for (i = 1; i <= 3; i++) {
+ unsigned long timeout = jiffies + HZ / 2;
+
+ while (!READ_ONCE(slot[i]) &&
+ time_before(jiffies, timeout))
+ ;
+
+ if (!time_before(jiffies, timeout)) {
+ pr_err("%s: rq[%d] timed out\n",
+ engine->name, i - 1);
+ err = -ETIME;
+ goto err;
+ }
+
+ pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+ }
+
+ /* XZY: XZ < XY */
+ if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+ pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+ engine->name,
+ slot[Z] - slot[X],
+ slot[Y] - slot[X]);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(&slot[0], -1, 4);
+ wmb();
+
+ engine->props.timeslice_duration_ms = timeslice;
+ st_engine_heartbeat_enable(engine);
+ for (i = 0; i < 3; i++)
+ i915_request_put(rq[i]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static long slice_timeout(struct intel_engine_cs *engine)
+{
+ long timeout;
+
+ /* Enough time for a timeslice to kick in, and kick out */
+ timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
+
+ /* Enough time for the nop request to complete */
+ timeout += HZ / 5;
+
+ return timeout + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
+ struct i915_request *rq, *nop;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_heartbeat;
+ }
+ engine->sched_engine->schedule(rq, &attr);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ goto err_rq;
+ }
+ err = wait_for_submit(engine, nop, HZ / 2);
+ i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err)
+ goto err_rq;
+
+ /* Wait until we ack the release_queue and start timeslicing */
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (READ_ONCE(engine->execlists.pending[0]));
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+err_rq:
+ i915_request_put(rq);
+err_heartbeat:
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int live_timeslice_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We should not timeslice into a request that is marked with
+ * I915_REQUEST_NOPREEMPT.
+ */
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ unsigned long timeslice;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ st_engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ /* Create an unpreemptible spinner */
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
+ i915_request_put(rq);
+
+ /* Followed by a maximum priority barrier (heartbeat) */
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_spin;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /*
+ * Wait until the barrier is in ELSP, and we know timeslicing
+ * will have been activated.
+ */
+ if (wait_for_submit(engine, rq, HZ / 2)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ /*
+ * Since the ELSP[0] request is unpreemptible, it should not
+ * allow the maximum priority barrier through. Wait long
+ * enough to see if it is timesliced in by mistake.
+ */
+ if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
+ pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_end(&spin);
+out_heartbeat:
+ xchg(&engine->props.timeslice_duration_ms, timeslice);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_busywait_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ u32 *map;
+ int err;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ ctx_hi = kernel_context(gt->i915, NULL);
+ if (IS_ERR(ctx_hi))
+ return PTR_ERR(ctx_hi);
+
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+ ctx_lo = kernel_context(gt->i915, NULL);
+ if (IS_ERR(ctx_lo)) {
+ err = PTR_ERR(ctx_lo);
+ goto err_ctx_hi;
+ }
+
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_vma;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = igt_request_alloc(ctx_lo, engine);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ i915_request_put(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_request_put(lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+ return err;
+}
+
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arb)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = igt_spinner_create_request(spin, ce, arb);
+ intel_context_put(ce);
+ return rq;
+}
+
+static int live_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915, NULL);
+ if (!ctx_hi)
+ return -ENOMEM;
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+ ctx_lo = kernel_context(gt->i915, NULL);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ goto err_ctx_lo;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_spin_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_spin_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ GEM_TRACE("lo spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_spin_lo;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_spin_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ GEM_TRACE("hi spinner failed to start\n");
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_spin_lo;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_spin_lo;
+ }
+ }
+
+ err = 0;
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+ return err;
+}
+
+static int live_late_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_hi, spin_lo;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {};
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915, NULL);
+ if (!ctx_hi)
+ return -ENOMEM;
+
+ ctx_lo = kernel_context(gt->i915, NULL);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+
+ if (igt_spinner_init(&spin_hi, gt))
+ goto err_ctx_lo;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
+ /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+ ctx_lo->sched.priority = 1;
+
+ for_each_engine(engine, gt, id) {
+ struct igt_live_test t;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_spin_lo;
+ }
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_spin_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ pr_err("First context failed to start\n");
+ goto err_wedged;
+ }
+
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_spin_lo;
+ }
+
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("Second context overtook first?\n");
+ goto err_wedged;
+ }
+
+ attr.priority = I915_PRIORITY_MAX;
+ engine->sched_engine->schedule(rq, &attr);
+
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
+ pr_err("High priority context failed to preempt the low priority context\n");
+ GEM_TRACE_DUMP();
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_spin_lo;
+ }
+ }
+
+ err = 0;
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_spin_hi:
+ igt_spinner_fini(&spin_hi);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_spin_lo;
+}
+
+struct preempt_client {
+ struct igt_spinner spin;
+ struct i915_gem_context *ctx;
+};
+
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
+{
+ c->ctx = kernel_context(gt->i915, NULL);
+ if (!c->ctx)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&c->spin, gt))
+ goto err_ctx;
+
+ return 0;
+
+err_ctx:
+ kernel_context_close(c->ctx);
+ return -ENOMEM;
+}
+
+static void preempt_client_fini(struct preempt_client *c)
+{
+ igt_spinner_fini(&c->spin);
+ kernel_context_close(c->ctx);
+}
+
+static int live_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_PRIORITY_MAX;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_ban(rq->context, rq);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ intel_context_ban(rq[1]->context, rq[1]);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ intel_context_ban(rq[2]->context, rq[2]);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
+ goto out;
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * The behavior between having semaphores and not is different. With
+ * semaphores the subsequent request is on the hardware and not cancelled
+ * while without the request is held in the driver and cancelled.
+ */
+ if (intel_engine_has_semaphores(rq[1]->engine) &&
+ rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return 0;
+
+ if (!intel_has_reset_engine(arg->engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_ban(rq->context, rq);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int __cancel_fail(struct live_preempt_cancel *arg)
+{
+ struct intel_engine_cs *engine = arg->engine;
+ struct i915_request *rq;
+ int err;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return 0;
+
+ if (!intel_has_reset_engine(engine->gt))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, engine->name);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ intel_context_set_banned(rq->context);
+
+ err = intel_engine_pulse(engine);
+ if (err)
+ goto out;
+
+ force_reset_timeout(engine);
+
+ /* force preempt reset [failure] */
+ while (!engine->execlists.pending[0])
+ intel_engine_flush_submission(engine);
+ del_timer_sync(&engine->execlists.preempt);
+ intel_engine_flush_submission(engine);
+
+ cancel_reset_timeout(engine);
+
+ /* after failure, require heartbeats to reset device */
+ intel_engine_set_heartbeat(engine, 1);
+ err = wait_for_reset(engine, rq, HZ / 2);
+ intel_engine_set_heartbeat(engine,
+ engine->defaults.heartbeat_interval_ms);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_fail(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
+static int live_suppress_self_preempt(void *arg)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that if a preemption request does not cause a change in
+ * the current execution order, the preempt-to-idle injection is
+ * skipped and that we do not accidentally apply it after the CS
+ * completion event.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0; /* presume black blox */
+
+ if (intel_vgpu_active(gt->i915))
+ return 0; /* GVT forces single port & request submission */
+
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
+ goto err_client_a;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq_a, *rq_b;
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ st_engine_heartbeat_disable(engine);
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ /* Keep postponing the timer to avoid premature slicing */
+ mod_timer(&engine->execlists.timer, jiffies + HZ);
+ for (depth = 0; depth < 8; depth++) {
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ st_engine_heartbeat_enable(engine);
+ goto err_client_b;
+ }
+ i915_request_add(rq_b);
+
+ GEM_BUG_ON(i915_request_completed(rq_a));
+ engine->sched_engine->schedule(rq_a, &attr);
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ st_engine_heartbeat_enable(engine);
+ goto err_wedged;
+ }
+
+ swap(a, b);
+ rq_a = rq_b;
+ }
+ igt_spinner_end(&a.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ st_engine_heartbeat_enable(engine);
+ err = -EINVAL;
+ goto err_client_b;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
+static int live_chain_preempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client hi, lo;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Build a chain AB...BA between two contexts (A, B) and request
+ * preemption of the last request. It should then complete before
+ * the previously submitted spinner in B.
+ */
+
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
+
+ if (preempt_client_init(gt, &lo))
+ goto err_client_hi;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
+ rq = spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&hi.spin, rq))
+ goto err_wedged;
+
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ for (i = 0; i < count; i++) {
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ }
+
+ rq = igt_request_alloc(hi.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ engine->sched_engine->schedule(rq, &attr);
+
+ igt_spinner_end(&hi.spin);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to preempt over chain of %d\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
+
+ rq = igt_request_alloc(lo.ctx, engine);
+ if (IS_ERR(rq))
+ goto err_wedged;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ goto err_wedged;
+ }
+ i915_request_put(rq);
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+ }
+
+ err = 0;
+err_client_lo:
+ preempt_client_fini(&lo);
+err_client_hi:
+ preempt_client_fini(&hi);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&hi.spin);
+ igt_spinner_end(&lo.spin);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_client_lo;
+}
+
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_obj;
+ }
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_obj;
+ }
+
+ rq->batch = i915_vma_get(vma);
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->mock.link.next = &(*prev)->mock.link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int __live_preempt_ring(struct intel_engine_cs *engine,
+ struct igt_spinner *spin,
+ int queue_sz, int ring_sz)
+{
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err = 0;
+ int n;
+
+ if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
+ return -EIO;
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ tmp->ring_size = ring_sz;
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ memset32(tmp->ring->vaddr,
+ 0xdeadbeef, /* trigger a hang if executed */
+ tmp->ring->vma->size / sizeof(u32));
+
+ ce[n] = tmp;
+ }
+
+ rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ i915_request_get(rq);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ intel_gt_set_wedged(engine->gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ /* Fill the ring, until we will cause a wrap */
+ n = 0;
+ while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
+ struct i915_request *tmp;
+
+ tmp = intel_context_create_request(ce[0]);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ i915_request_put(rq);
+ goto err_ce;
+ }
+
+ i915_request_add(tmp);
+ intel_engine_flush_submission(engine);
+ n++;
+ }
+ intel_engine_flush_submission(engine);
+ pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
+ engine->name, queue_sz, n,
+ ce[0]->ring->size,
+ ce[0]->ring->tail,
+ ce[0]->ring->emit,
+ rq->tail);
+ i915_request_put(rq);
+
+ /* Create a second request to preempt the first ring */
+ rq = intel_context_create_request(ce[1]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ce;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ i915_request_put(rq);
+ if (err) {
+ pr_err("%s: preemption request was not submitted\n",
+ engine->name);
+ err = -ETIME;
+ }
+
+ pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
+ engine->name,
+ ce[0]->ring->tail, ce[0]->ring->emit,
+ ce[1]->ring->tail, ce[1]->ring->emit);
+
+err_ce:
+ intel_engine_flush_submission(engine);
+ igt_spinner_end(spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that we rollback large chunks of a ring in order to do a
+ * preemption event. Similar to live_unlite_ring, but looking at
+ * ring size rather than the impact of intel_ring_direction().
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ int n;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ for (n = 0; n <= 3; n++) {
+ err = __live_preempt_ring(engine, &spin,
+ n * SZ_4K / 4, SZ_4K);
+ if (err)
+ break;
+ }
+
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = { .priority = prio++ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->sched_engine->schedule(rq, &attr);
+ } while (prio <= I915_PRIORITY_MAX &&
+ !__igt_timeout(end_time, NULL));
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n = list_next_entry(rq, mock.link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_gpr_user(struct intel_engine_cs *engine,
+ struct i915_vma *result,
+ unsigned int offset)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+ int i;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, result->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(vma);
+ return ERR_CAST(cs);
+ }
+
+ /* All GPR are clear for new contexts. We use GPR(0) as a constant */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, 0);
+ *cs++ = 1;
+
+ for (i = 1; i < NUM_GPR; i++) {
+ u64 addr;
+
+ /*
+ * Perform: GPR[i]++
+ *
+ * As we read and write into the context saved GPR[i], if
+ * we restart this batch buffer from an earlier point, we
+ * will repeat the increment and store a value > 1.
+ */
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
+
+ addr = result->node.start + offset + i * sizeof(*cs);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = CS_GPR(engine, 2 * i);
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = i;
+ *cs++ = lower_32_bits(result->node.start);
+ *cs++ = upper_32_bits(result->node.start);
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+}
+
+static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_ggtt_pin(vma, NULL, 0, 0);
+ if (err) {
+ i915_vma_put(vma);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_request *
+create_gpr_client(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ unsigned int offset)
+{
+ struct i915_vma *batch, *vma;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ vma = i915_vma_instance(global->obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_ce;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_ce;
+
+ batch = create_gpr_user(engine, vma, offset);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_vma;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+
+ i915_vma_lock(batch);
+ if (!err)
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(batch);
+ i915_vma_unpin(batch);
+
+ if (!err)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+out_batch:
+ i915_vma_put(batch);
+out_vma:
+ i915_vma_unpin(vma);
+out_ce:
+ intel_context_put(ce);
+ return err ? ERR_PTR(err) : rq;
+}
+
+static int preempt_user(struct intel_engine_cs *engine,
+ struct i915_vma *global,
+ int id)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_PRIORITY_MAX
+ };
+ struct i915_request *rq;
+ int err = 0;
+ u32 *cs;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(global);
+ *cs++ = 0;
+ *cs++ = id;
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ engine->sched_engine->schedule(rq, &attr);
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int live_preempt_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *global;
+ enum intel_engine_id id;
+ u32 *result;
+ int err = 0;
+
+ /*
+ * In our other tests, we look at preemption in carefully
+ * controlled conditions in the ringbuffer. Since most of the
+ * time is spent in user batches, most of our preemptions naturally
+ * occur there. We want to verify that when we preempt inside a batch
+ * we continue on from the current instruction and do not roll back
+ * to the start, or another earlier arbitration point.
+ *
+ * To verify this, we create a batch which is a mixture of
+ * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
+ * a few preempting contexts thrown into the mix, we look for any
+ * repeated instructions (which show up as incorrect values).
+ */
+
+ global = create_global(gt, 4096);
+ if (IS_ERR(global))
+ return PTR_ERR(global);
+
+ result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ i915_vma_unpin_and_release(&global, 0);
+ return PTR_ERR(result);
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *client[3] = {};
+ struct igt_live_test t;
+ int i;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS)
+ continue; /* we need per-context GPR */
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ memset(result, 0, 4096);
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_request *rq;
+
+ rq = create_gpr_client(engine, global,
+ NUM_GPR * i * sizeof(u32));
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end_test;
+ }
+
+ client[i] = rq;
+ }
+
+ /* Continuously preempt the set of 3 running contexts */
+ for (i = 1; i <= NUM_GPR; i++) {
+ err = preempt_user(engine, global, i);
+ if (err)
+ goto end_test;
+ }
+
+ if (READ_ONCE(result[0]) != NUM_GPR) {
+ pr_err("%s: Failed to release semaphore\n",
+ engine->name);
+ err = -EIO;
+ goto end_test;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ int gpr;
+
+ if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
+ err = -ETIME;
+ goto end_test;
+ }
+
+ for (gpr = 1; gpr < NUM_GPR; gpr++) {
+ if (result[NUM_GPR * i + gpr] != 1) {
+ pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
+ engine->name,
+ i, gpr, result[NUM_GPR * i + gpr]);
+ err = -EINVAL;
+ goto end_test;
+ }
+ }
+ }
+
+end_test:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i])
+ break;
+
+ i915_request_put(client[i]);
+ }
+
+ /* Flush the semaphores on error */
+ smp_store_mb(result[0], -1);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ ctx_hi = kernel_context(gt->i915, NULL);
+ if (!ctx_hi)
+ return -ENOMEM;
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+ ctx_lo = kernel_context(gt->i915, NULL);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_ctx_lo;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_spin_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_spin_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_spin_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_spin_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+ return err;
+}
+
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+ return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+ return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+ struct intel_gt *gt;
+ struct kthread_work work;
+ struct i915_gem_context **contexts;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
+ unsigned int ncontext;
+ struct rnd_state prng;
+ unsigned long count;
+ int result;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+ return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+ &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_eb_vm(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = igt_request_alloc(ctx, smoke->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+static void smoke_crescendo_work(struct kthread_work *work)
+{
+ struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+
+ smoke->result = smoke_submit(smoke, ctx,
+ count % I915_PRIORITY_MAX,
+ smoke->batch);
+
+ count++;
+ } while (!smoke->result && count < smoke->ncontext &&
+ !__igt_timeout(end_time, NULL));
+
+ smoke->count = count;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+ struct kthread_worker *worker[I915_NUM_ENGINES] = {};
+ struct preempt_smoke *arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ arg = kmalloc_array(I915_NUM_ENGINES, sizeof(*arg), GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+
+ memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg));
+
+ for_each_engine(engine, smoke->gt, id) {
+ arg[id] = *smoke;
+ arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
+ arg[id].count = 0;
+
+ worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
+ if (IS_ERR(worker[id])) {
+ err = PTR_ERR(worker[id]);
+ break;
+ }
+
+ kthread_init_work(&arg[id].work, smoke_crescendo_work);
+ kthread_queue_work(worker[id], &arg[id].work);
+ }
+
+ count = 0;
+ for_each_engine(engine, smoke->gt, id) {
+ if (IS_ERR_OR_NULL(worker[id]))
+ continue;
+
+ kthread_flush_work(&arg[id].work);
+ if (arg[id].result && !err)
+ err = arg[id].result;
+
+ count += arg[id].count;
+
+ kthread_destroy_worker(worker[id]);
+ }
+
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+
+ kfree(arg);
+ return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ for_each_engine(smoke->engine, smoke->gt, id) {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
+
+ count++;
+ }
+ } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
+
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+ return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+ struct preempt_smoke smoke = {
+ .gt = arg,
+ .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+ .ncontext = 256,
+ };
+ const unsigned int phase[] = { 0, BATCH };
+ struct igt_live_test t;
+ int err = -ENOMEM;
+ u32 *cs;
+ int n;
+
+ smoke.contexts = kmalloc_array(smoke.ncontext,
+ sizeof(*smoke.contexts),
+ GFP_KERNEL);
+ if (!smoke.contexts)
+ return -ENOMEM;
+
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
+ i915_gem_object_unpin_map(smoke.batch);
+
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
+ err = -EIO;
+ goto err_batch;
+ }
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ smoke.contexts[n] = kernel_context(smoke.gt->i915, NULL);
+ if (!smoke.contexts[n])
+ goto err_ctx;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
+
+err_ctx:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ if (!smoke.contexts[n])
+ break;
+ kernel_context_close(smoke.contexts[n]);
+ }
+
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_free:
+ kfree(smoke.contexts);
+
+ return err;
+}
+
+static int nop_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int nctx,
+ unsigned int flags)
+#define CHAIN BIT(0)
+{
+ IGT_TIMEOUT(end_time);
+ struct i915_request *request[16] = {};
+ struct intel_context *ve[16];
+ unsigned long n, prime, nc;
+ struct igt_live_test t;
+ ktime_t times[2] = {};
+ int err;
+
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
+
+ for (n = 0; n < nctx; n++) {
+ ve[n] = intel_engine_create_virtual(siblings, nsibling, 0);
+ if (IS_ERR(ve[n])) {
+ err = PTR_ERR(ve[n]);
+ nctx = n;
+ goto out;
+ }
+
+ err = intel_context_pin(ve[n]);
+ if (err) {
+ intel_context_put(ve[n]);
+ nctx = n;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
+ if (err)
+ goto out;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ if (flags & CHAIN) {
+ for (nc = 0; nc < nctx; nc++) {
+ for (n = 0; n < prime; n++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ } else {
+ for (n = 0; n < prime; n++) {
+ for (nc = 0; nc < nctx; nc++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+ }
+ }
+
+ for (nc = 0; nc < nctx; nc++) {
+ if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ break;
+ }
+ }
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+ pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ intel_context_unpin(ve[nc]);
+ intel_context_put(ve[nc]);
+ }
+ return err;
+}
+
+static unsigned int
+__select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ bool (*filter)(const struct intel_engine_cs *))
+{
+ unsigned int n = 0;
+ unsigned int inst;
+
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ if (filter && !filter(gt->engine_class[class][inst]))
+ continue;
+
+ siblings[n++] = gt->engine_class[class][inst];
+ }
+
+ return n;
+}
+
+static unsigned int
+select_siblings(struct intel_gt *gt,
+ unsigned int class,
+ struct intel_engine_cs **siblings)
+{
+ return __select_siblings(gt, class, siblings, NULL);
+}
+
+static int live_virtual_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
+ if (err) {
+ pr_err("Failed to wrap engine %s: err=%d\n",
+ engine->name, err);
+ return err;
+ }
+ }
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, n;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ for (n = 1; n <= nsibling + 1; n++) {
+ err = nop_virtual_engine(gt, siblings, nsibling,
+ n, 0);
+ if (err)
+ return err;
+ }
+
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mask_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
+ struct intel_context *ve;
+ struct igt_live_test t;
+ unsigned int n;
+ int err;
+
+ /*
+ * Check that by setting the execution mask on a request, we can
+ * restrict it to our desired engine within the virtual engine.
+ */
+
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_close;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < nsibling; n++) {
+ request[n] = i915_request_create(ve);
+ if (IS_ERR(request[n])) {
+ err = PTR_ERR(request[n]);
+ nsibling = n;
+ goto out;
+ }
+
+ /* Reverse order as it's more likely to be unnatural */
+ request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+
+ i915_request_get(request[n]);
+ i915_request_add(request[n]);
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out;
+ }
+
+ if (request[n]->engine != siblings[nsibling - n - 1]) {
+ pr_err("Executed on wrong sibling '%s', expected '%s'\n",
+ request[n]->engine->name,
+ siblings[nsibling - n - 1]->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_end(&t);
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (n = 0; n < nsibling; n++)
+ i915_request_put(request[n]);
+
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_close:
+ return err;
+}
+
+static int live_virtual_mask(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int slicein_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must take part in timeslicing on the target engines.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ ce = intel_engine_create_virtual(siblings, nsibling, 0);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+ __func__, rq->engine->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ const long timeout = slice_timeout(siblings[0]);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct igt_spinner spin;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * Virtual requests must allow others a fair timeslice.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ /* XXX We do not handle oversubscription and fairness with normal rq */
+ for (n = 0; n < nsibling; n++) {
+ ce = intel_engine_create_virtual(siblings, nsibling, 0);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_add(rq);
+ }
+
+ for (n = 0; !err && n < nsibling; n++) {
+ ce = intel_context_create(siblings[n]);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0) {
+ GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+ __func__, siblings[n]->name);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+ }
+
+out:
+ igt_spinner_end(&spin);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+ int err;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = __select_siblings(gt, class, siblings,
+ intel_engine_has_timeslices);
+ if (nsibling < 2)
+ continue;
+
+ err = slicein_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+
+ err = sliceout_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ scratch =
+ __vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm,
+ PAGE_SIZE);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto out_scratch;
+
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (GRAPHICS_VER(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_disable(siblings[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ err = engine_lock_reset_tasklet(engine);
+ if (err)
+ goto out_heartbeat;
+
+ engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->sched_engine->lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->sched_engine->lock);
+ GEM_BUG_ON(rq->engine != engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ __intel_engine_reset_bh(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ engine_unlock_reset_tasklet(engine);
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ st_engine_heartbeat_enable(siblings[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = select_siblings(gt, class, siblings);
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_execlists_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
+ SUBTEST(live_unlite_ring),
+ SUBTEST(live_pin_rewind),
+ SUBTEST(live_hold_reset),
+ SUBTEST(live_error_interrupt),
+ SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_rewind),
+ SUBTEST(live_timeslice_queue),
+ SUBTEST(live_timeslice_nopreempt),
+ SUBTEST(live_busywait_preempt),
+ SUBTEST(live_preempt),
+ SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
+ SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_ring),
+ SUBTEST(live_preempt_gang),
+ SUBTEST(live_preempt_timeout),
+ SUBTEST(live_preempt_user),
+ SUBTEST(live_preempt_smoke),
+ SUBTEST(live_virtual_engine),
+ SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
+ SUBTEST(live_virtual_slice),
+ SUBTEST(live_virtual_reset),
+ };
+
+ if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP)
+ return 0;
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
new file mode 100644
index 000000000..be94f863b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "intel_engine_regs.h"
+#include "intel_gt_clock_utils.h"
+
+#include "selftest_llc.h"
+#include "selftest_rc6.h"
+#include "selftest_rps.h"
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static void measure_clocks(struct intel_engine_cs *engine,
+ u32 *out_cycles, ktime_t *out_dt)
+{
+ ktime_t dt[5];
+ u32 cycles[5];
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ local_irq_disable();
+ cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP);
+ dt[i] = ktime_get();
+
+ udelay(1000);
+
+ dt[i] = ktime_sub(ktime_get(), dt[i]);
+ cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP);
+ local_irq_enable();
+ }
+
+ /* Use the median of both cycle/dt; close enough */
+ sort(cycles, 5, sizeof(*cycles), cmp_u32, NULL);
+ *out_cycles = (cycles[1] + 2 * cycles[2] + cycles[3]) / 4;
+
+ sort(dt, 5, sizeof(*dt), cmp_u64, NULL);
+ *out_dt = div_u64(dt[1] + 2 * dt[2] + dt[3], 4);
+}
+
+static int live_gt_clocks(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!gt->clock_frequency) { /* unknown */
+ pr_info("CS_TIMESTAMP frequency unknown\n");
+ return 0;
+ }
+
+ if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
+ return 0;
+
+ if (GRAPHICS_VER(gt->i915) == 5)
+ /*
+ * XXX CS_TIMESTAMP low dword is dysfunctional?
+ *
+ * Ville's experiments indicate the high dword still works,
+ * but at a correspondingly reduced frequency.
+ */
+ return 0;
+
+ if (GRAPHICS_VER(gt->i915) == 4)
+ /*
+ * XXX CS_TIMESTAMP appears gibberish
+ *
+ * Ville's experiments indicate that it mostly appears 'stuck'
+ * in that we see the register report the same cycle count
+ * for a couple of reads.
+ */
+ return 0;
+
+ intel_gt_pm_get(gt);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ for_each_engine(engine, gt, id) {
+ u32 cycles;
+ u32 expected;
+ u64 time;
+ u64 dt;
+
+ if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0)
+ continue;
+
+ measure_clocks(engine, &cycles, &dt);
+
+ time = intel_gt_clock_interval_to_ns(engine->gt, cycles);
+ expected = intel_gt_ns_to_clock_interval(engine->gt, dt);
+
+ pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
+ engine->name, cycles, time, dt, expected,
+ engine->gt->clock_frequency / 1000);
+
+ if (9 * time < 8 * dt || 8 * time > 9 * dt) {
+ pr_err("%s: CS ticks did not match walltime!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (9 * expected < 8 * cycles || 8 * expected > 9 * cycles) {
+ pr_err("%s: walltime did not match CS ticks!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_gt_pm_put(gt);
+
+ return err;
+}
+
+static int live_gt_resume(void *arg)
+{
+ struct intel_gt *gt = arg;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ /* Do several suspend/resume cycles to check we don't explode! */
+ do {
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ if (gt->rc6.enabled) {
+ pr_err("rc6 still enabled after suspend!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = intel_gt_resume(gt);
+ if (err)
+ break;
+
+ if (gt->rc6.supported && !gt->rc6.enabled) {
+ pr_err("rc6 not enabled upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = st_llc_verify(&gt->llc);
+ if (err) {
+ pr_err("llc state not restored upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_gt_clocks),
+ SUBTEST(live_rc6_manual),
+ SUBTEST(live_rps_clock_interval),
+ SUBTEST(live_rps_control),
+ SUBTEST(live_rps_frequency_cs),
+ SUBTEST(live_rps_frequency_srm),
+ SUBTEST(live_rps_power),
+ SUBTEST(live_rps_interrupt),
+ SUBTEST(live_rps_dynamic),
+ SUBTEST(live_gt_resume),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
+
+int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ /*
+ * These tests may leave the system in an undesirable state.
+ * They are intended to be run last in CI and the system
+ * rebooted afterwards.
+ */
+ SUBTEST(live_rc6_ctx_wa),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
new file mode 100644
index 000000000..71263058a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -0,0 +1,2060 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include <linux/kthread.h>
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
+
+#include "i915_gem_evict.h"
+#include "intel_gt.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "selftest_engine_heartbeat.h"
+
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_atomic.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/intel_scheduler_helpers.h"
+
+#include "selftests/mock_drm.h"
+
+#include "gem/selftests/mock_context.h"
+#include "gem/selftests/igt_gem_utils.h"
+
+#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
+
+struct hang {
+ struct intel_gt *gt;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ u32 *seqno;
+ u32 *batch;
+};
+
+static int hang_init(struct hang *h, struct intel_gt *gt)
+{
+ void *vaddr;
+ int err;
+
+ memset(h, 0, sizeof(*h));
+ h->gt = gt;
+
+ h->ctx = kernel_context(gt->i915, NULL);
+ if (IS_ERR(h->ctx))
+ return PTR_ERR(h->ctx);
+
+ GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
+
+ h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(h->hws)) {
+ err = PTR_ERR(h->hws);
+ goto err_ctx;
+ }
+
+ h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(h->obj)) {
+ err = PTR_ERR(h->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ vaddr = i915_gem_object_pin_map_unlocked(h->obj,
+ i915_coherent_map_type(gt->i915, h->obj, false));
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ h->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(h->hws);
+err_obj:
+ i915_gem_object_put(h->obj);
+err_hws:
+ i915_gem_object_put(h->hws);
+err_ctx:
+ kernel_context_close(h->ctx);
+ return err;
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct i915_request *rq)
+{
+ return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
+}
+
+static int move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static struct i915_request *
+hang_create_request(struct hang *h, struct intel_engine_cs *engine)
+{
+ struct intel_gt *gt = h->gt;
+ struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx);
+ struct drm_i915_gem_object *obj;
+ struct i915_request *rq = NULL;
+ struct i915_vma *hws, *vma;
+ unsigned int flags;
+ void *vaddr;
+ u32 *batch;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ i915_vm_put(vm);
+ return ERR_CAST(obj);
+ }
+
+ vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ i915_vm_put(vm);
+ return ERR_CAST(vaddr);
+ }
+
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
+
+ h->obj = obj;
+ h->batch = vaddr;
+
+ vma = i915_vma_instance(h->obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_vm_put(vm);
+ return ERR_CAST(vma);
+ }
+
+ hws = i915_vma_instance(h->hws, vm, NULL);
+ if (IS_ERR(hws)) {
+ i915_vm_put(vm);
+ return ERR_CAST(hws);
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vm_put(vm);
+ return ERR_PTR(err);
+ }
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ rq = igt_request_alloc(h->ctx, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin_hws;
+ }
+
+ err = move_to_active(vma, rq, 0);
+ if (err)
+ goto cancel_rq;
+
+ err = move_to_active(hws, rq, 0);
+ if (err)
+ goto cancel_rq;
+
+ batch = h->batch;
+ if (GRAPHICS_VER(gt->i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_NOOP;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_NOOP;
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ } else if (GRAPHICS_VER(gt->i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_NOOP;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_NOOP;
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
+ *batch++ = lower_32_bits(vma->node.start);
+ } else if (GRAPHICS_VER(gt->i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_NOOP;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_NOOP;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
+ *batch++ = lower_32_bits(vma->node.start);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_NOOP;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_NOOP;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
+ *batch++ = lower_32_bits(vma->node.start);
+ }
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+ intel_gt_chipset_flush(engine->gt);
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
+ flags = 0;
+ if (GRAPHICS_VER(gt->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+
+cancel_rq:
+ if (err) {
+ i915_request_set_error_once(rq, err);
+ i915_request_add(rq);
+ }
+unpin_hws:
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ i915_vm_put(vm);
+ return err ? ERR_PTR(err) : rq;
+}
+
+static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
+{
+ return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
+}
+
+static void hang_fini(struct hang *h)
+{
+ *h->batch = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(h->gt);
+
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
+
+ i915_gem_object_unpin_map(h->hws);
+ i915_gem_object_put(h->hws);
+
+ kernel_context_close(h->ctx);
+
+ igt_flush_test(h->gt->i915);
+}
+
+static bool wait_until_running(struct hang *h, struct i915_request *rq)
+{
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 1000));
+}
+
+static int igt_hang_sanitycheck(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_request *rq;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct hang h;
+ int err;
+
+ /* Basic check that we can execute our hanging batch */
+
+ err = hang_init(&h, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_wedge_me w;
+ long timeout;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("Failed to create request for %s, err=%d\n",
+ engine->name, err);
+ goto fini;
+ }
+
+ i915_request_get(rq);
+
+ *h.batch = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(engine->gt);
+
+ i915_request_add(rq);
+
+ timeout = 0;
+ intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
+ timeout = i915_request_wait(rq, 0,
+ MAX_SCHEDULE_TIMEOUT);
+ if (intel_gt_is_wedged(gt))
+ timeout = -EIO;
+
+ i915_request_put(rq);
+
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("Wait for request failed on %s, err=%d\n",
+ engine->name, err);
+ goto fini;
+ }
+ }
+
+fini:
+ hang_fini(&h);
+ return err;
+}
+
+static bool wait_for_idle(struct intel_engine_cs *engine)
+{
+ return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
+}
+
+static int igt_reset_nop(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ unsigned int reset_count, count;
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ int err = 0;
+
+ /* Check that we can reset during non-user portions of requests */
+
+ reset_count = i915_reset_count(global);
+ count = 0;
+ do {
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ int i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ pr_err("[%s] Create context failed: %d!\n", engine->name, err);
+ break;
+ }
+
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("[%s] Create request failed: %d!\n",
+ engine->name, err);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+
+ intel_context_put(ce);
+ }
+
+ igt_global_reset_lock(gt);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(gt);
+
+ if (intel_gt_is_wedged(gt)) {
+ pr_err("[%s] GT is wedged!\n", engine->name);
+ err = -EIO;
+ break;
+ }
+
+ if (i915_reset_count(global) != reset_count + ++count) {
+ pr_err("[%s] Reset not recorded: %d vs %d + %d!\n",
+ engine->name, i915_reset_count(global), reset_count, count);
+ err = -EINVAL;
+ break;
+ }
+
+ err = igt_flush_test(gt->i915);
+ if (err) {
+ pr_err("[%s] Flush failed: %d!\n", engine->name, err);
+ break;
+ }
+ } while (time_before(jiffies, end_time));
+ pr_info("%s: %d resets\n", __func__, count);
+
+ if (igt_flush_test(gt->i915)) {
+ pr_err("Post flush failed: %d!\n", err);
+ err = -EIO;
+ }
+
+ return err;
+}
+
+static int igt_reset_nop_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Check that we can engine-reset during non-user portions */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ unsigned int reset_count, reset_engine_count, count;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ if (intel_engine_uses_guc(engine)) {
+ /* Engine level resets are triggered by GuC when a hang
+ * is detected. They can't be triggered by the KMD any
+ * more. Thus a nop batch cannot be used as a reset test
+ */
+ continue;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
+ return PTR_ERR(ce);
+ }
+
+ reset_count = i915_reset_count(global);
+ reset_engine_count = i915_reset_engine_count(global, engine);
+ count = 0;
+
+ st_engine_heartbeat_disable(engine);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
+ do {
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ err = intel_engine_reset(engine, NULL);
+ if (err) {
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
+ break;
+ }
+
+ if (i915_reset_count(global) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_reset_engine_count(global, engine) !=
+ reset_engine_count + ++count) {
+ pr_err("%s engine reset not recorded!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+ } while (time_before(jiffies, end_time));
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ st_engine_heartbeat_enable(engine);
+
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+
+ intel_context_put(ce);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void force_reset_timeout(struct intel_engine_cs *engine)
+{
+ engine->reset_timeout.probability = 999;
+ atomic_set(&engine->reset_timeout.times, -1);
+}
+
+static void cancel_reset_timeout(struct intel_engine_cs *engine)
+{
+ memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
+}
+
+static int igt_reset_fail_engine(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Check that we can recover from engine-reset failues */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ unsigned int count;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ /* Can't manually break the reset if i915 doesn't perform it */
+ if (intel_engine_uses_guc(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
+ return PTR_ERR(ce);
+ }
+
+ st_engine_heartbeat_disable(engine);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
+
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err == 0) /* timeouts only generated on gen8+ */
+ goto skip;
+
+ count = 0;
+ do {
+ struct i915_request *last = NULL;
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < count % 15; i++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to submit request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+ if (last)
+ i915_request_put(last);
+
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (last)
+ i915_request_put(last);
+ last = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ if (count & 1) {
+ err = intel_engine_reset(engine, NULL);
+ if (err) {
+ GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
+ GEM_TRACE_DUMP();
+ i915_request_put(last);
+ break;
+ }
+ } else {
+ force_reset_timeout(engine);
+ err = intel_engine_reset(engine, NULL);
+ cancel_reset_timeout(engine);
+ if (err != -ETIMEDOUT) {
+ pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
+ engine->name, err);
+ i915_request_put(last);
+ break;
+ }
+ }
+
+ err = 0;
+ if (last) {
+ if (i915_request_wait(last, 0, HZ / 2) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ intel_engine_dump(engine, &p,
+ "%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+
+ GEM_TRACE("%s(%s): failed to complete request\n",
+ __func__,
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ err = -EIO;
+ }
+ i915_request_put(last);
+ }
+ count++;
+ } while (err == 0 && time_before(jiffies, end_time));
+out:
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+skip:
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ st_engine_heartbeat_enable(engine);
+ intel_context_put(ce);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __igt_reset_engine(struct intel_gt *gt, bool active)
+{
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct hang h;
+ int err = 0;
+
+ /* Check that we can issue an engine reset on an idle engine (no-op) */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (active) {
+ err = hang_init(&h, gt);
+ if (err)
+ return err;
+ }
+
+ for_each_engine(engine, gt, id) {
+ unsigned int reset_count, reset_engine_count;
+ unsigned long count;
+ bool using_guc = intel_engine_uses_guc(engine);
+ IGT_TIMEOUT(end_time);
+
+ if (using_guc && !active)
+ continue;
+
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ reset_count = i915_reset_count(global);
+ reset_engine_count = i915_reset_engine_count(global, engine);
+
+ st_engine_heartbeat_disable(engine);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
+ count = 0;
+ do {
+ struct i915_request *rq = NULL;
+ struct intel_selftest_saved_policy saved;
+ int err2;
+
+ err = intel_selftest_modify_policy(engine, &saved,
+ SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+ if (err) {
+ pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
+ break;
+ }
+
+ if (active) {
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("[%s] Create hang request failed: %d!\n",
+ engine->name, err);
+ goto restore;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!wait_until_running(&h, rq)) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to start request %llx, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ err = -EIO;
+ goto restore;
+ }
+ }
+
+ if (!using_guc) {
+ err = intel_engine_reset(engine, NULL);
+ if (err) {
+ pr_err("intel_engine_reset(%s) failed, err:%d\n",
+ engine->name, err);
+ goto skip;
+ }
+ }
+
+ if (rq) {
+ /* Ensure the reset happens and kills the engine */
+ err = intel_selftest_wait_for_rq(rq);
+ if (err)
+ pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
+ engine->name, rq->fence.context,
+ rq->fence.seqno, rq->context->guc_id.id, err);
+ }
+
+skip:
+ if (rq)
+ i915_request_put(rq);
+
+ if (i915_reset_count(global) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ goto restore;
+ }
+
+ /* GuC based resets are not logged per engine */
+ if (!using_guc) {
+ if (i915_reset_engine_count(global, engine) !=
+ ++reset_engine_count) {
+ pr_err("%s engine reset not recorded!\n",
+ engine->name);
+ err = -EINVAL;
+ goto restore;
+ }
+ }
+
+ count++;
+
+restore:
+ err2 = intel_selftest_restore_policy(engine, &saved);
+ if (err2)
+ pr_err("[%s] Restore policy failed: %d!\n", engine->name, err);
+ if (err == 0)
+ err = err2;
+ if (err)
+ break;
+ } while (time_before(jiffies, end_time));
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ st_engine_heartbeat_enable(engine);
+ pr_info("%s: Completed %lu %s resets\n",
+ engine->name, count, active ? "active" : "idle");
+
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915);
+ if (err) {
+ pr_err("[%s] Flush failed: %d!\n", engine->name, err);
+ break;
+ }
+ }
+
+ if (intel_gt_is_wedged(gt)) {
+ pr_err("GT is wedged!\n");
+ err = -EIO;
+ }
+
+ if (active)
+ hang_fini(&h);
+
+ return err;
+}
+
+static int igt_reset_idle_engine(void *arg)
+{
+ return __igt_reset_engine(arg, false);
+}
+
+static int igt_reset_active_engine(void *arg)
+{
+ return __igt_reset_engine(arg, true);
+}
+
+struct active_engine {
+ struct kthread_worker *worker;
+ struct kthread_work work;
+ struct intel_engine_cs *engine;
+ unsigned long resets;
+ unsigned int flags;
+ bool stop;
+ int result;
+};
+
+#define TEST_ACTIVE BIT(0)
+#define TEST_OTHERS BIT(1)
+#define TEST_SELF BIT(2)
+#define TEST_PRIORITY BIT(3)
+
+static int active_request_put(struct i915_request *rq)
+{
+ int err = 0;
+
+ if (!rq)
+ return 0;
+
+ if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
+ rq->engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(rq->engine->gt);
+ err = -EIO;
+ }
+
+ i915_request_put(rq);
+
+ return err;
+}
+
+static void active_engine(struct kthread_work *work)
+{
+ I915_RND_STATE(prng);
+ struct active_engine *arg = container_of(work, typeof(*arg), work);
+ struct intel_engine_cs *engine = arg->engine;
+ struct i915_request *rq[8] = {};
+ struct intel_context *ce[ARRAY_SIZE(rq)];
+ unsigned long count;
+ int err = 0;
+
+ for (count = 0; count < ARRAY_SIZE(ce); count++) {
+ ce[count] = intel_context_create(engine);
+ if (IS_ERR(ce[count])) {
+ arg->result = PTR_ERR(ce[count]);
+ pr_err("[%s] Create context #%ld failed: %d!\n",
+ engine->name, count, arg->result);
+ while (--count)
+ intel_context_put(ce[count]);
+ return;
+ }
+ }
+
+ count = 0;
+ while (!READ_ONCE(arg->stop)) {
+ unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
+ struct i915_request *old = rq[idx];
+ struct i915_request *new;
+
+ new = intel_context_create_request(ce[idx]);
+ if (IS_ERR(new)) {
+ err = PTR_ERR(new);
+ pr_err("[%s] Create request #%d failed: %d!\n", engine->name, idx, err);
+ break;
+ }
+
+ rq[idx] = i915_request_get(new);
+ i915_request_add(new);
+
+ if (engine->sched_engine->schedule && arg->flags & TEST_PRIORITY) {
+ struct i915_sched_attr attr = {
+ .priority =
+ i915_prandom_u32_max_state(512, &prng),
+ };
+ engine->sched_engine->schedule(rq[idx], &attr);
+ }
+
+ err = active_request_put(old);
+ if (err) {
+ pr_err("[%s] Request put failed: %d!\n", engine->name, err);
+ break;
+ }
+
+ cond_resched();
+ }
+
+ for (count = 0; count < ARRAY_SIZE(rq); count++) {
+ int err__ = active_request_put(rq[count]);
+
+ if (err)
+ pr_err("[%s] Request put #%ld failed: %d!\n", engine->name, count, err);
+
+ /* Keep the first error */
+ if (!err)
+ err = err__;
+
+ intel_context_put(ce[count]);
+ }
+
+ arg->result = err;
+}
+
+static int __igt_reset_engines(struct intel_gt *gt,
+ const char *test_name,
+ unsigned int flags)
+{
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine, *other;
+ struct active_engine *threads;
+ enum intel_engine_id id, tmp;
+ struct hang h;
+ int err = 0;
+
+ /* Check that issuing a reset on one engine does not interfere
+ * with any other engine.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (flags & TEST_ACTIVE) {
+ err = hang_init(&h, gt);
+ if (err)
+ return err;
+
+ if (flags & TEST_PRIORITY)
+ h.ctx->sched.priority = 1024;
+ }
+
+ threads = kmalloc_array(I915_NUM_ENGINES, sizeof(*threads), GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ unsigned long device = i915_reset_count(global);
+ unsigned long count = 0, reported;
+ bool using_guc = intel_engine_uses_guc(engine);
+ IGT_TIMEOUT(end_time);
+
+ if (flags & TEST_ACTIVE) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+ } else if (using_guc)
+ continue;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
+ engine->name, test_name);
+ err = -EIO;
+ break;
+ }
+
+ memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
+ for_each_engine(other, gt, tmp) {
+ struct kthread_worker *worker;
+
+ threads[tmp].resets =
+ i915_reset_engine_count(global, other);
+
+ if (other == engine && !(flags & TEST_SELF))
+ continue;
+
+ if (other != engine && !(flags & TEST_OTHERS))
+ continue;
+
+ threads[tmp].engine = other;
+ threads[tmp].flags = flags;
+
+ worker = kthread_create_worker(0, "igt/%s",
+ other->name);
+ if (IS_ERR(worker)) {
+ err = PTR_ERR(worker);
+ pr_err("[%s] Worker create failed: %d!\n",
+ engine->name, err);
+ goto unwind;
+ }
+
+ threads[tmp].worker = worker;
+
+ kthread_init_work(&threads[tmp].work, active_engine);
+ kthread_queue_work(threads[tmp].worker,
+ &threads[tmp].work);
+ }
+
+ st_engine_heartbeat_disable_no_pm(engine);
+ GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags));
+ do {
+ struct i915_request *rq = NULL;
+ struct intel_selftest_saved_policy saved;
+ int err2;
+
+ err = intel_selftest_modify_policy(engine, &saved,
+ SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+ if (err) {
+ pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
+ break;
+ }
+
+ if (flags & TEST_ACTIVE) {
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("[%s] Create hang request failed: %d!\n",
+ engine->name, err);
+ goto restore;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!wait_until_running(&h, rq)) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to start request %llx, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ err = -EIO;
+ goto restore;
+ }
+ } else {
+ intel_engine_pm_get(engine);
+ }
+
+ if (!using_guc) {
+ err = intel_engine_reset(engine, NULL);
+ if (err) {
+ pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
+ engine->name, test_name, err);
+ goto restore;
+ }
+ }
+
+ if (rq) {
+ /* Ensure the reset happens and kills the engine */
+ err = intel_selftest_wait_for_rq(rq);
+ if (err)
+ pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
+ engine->name, rq->fence.context,
+ rq->fence.seqno, rq->context->guc_id.id, err);
+ }
+
+ count++;
+
+ if (rq) {
+ if (rq->fence.error != -EIO) {
+ pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
+ engine->name, test_name,
+ rq->fence.context,
+ rq->fence.seqno, rq->context->guc_id.id);
+ i915_request_put(rq);
+
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto restore;
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to complete request %llx:%lld after reset\n",
+ engine->name, test_name,
+ rq->fence.context,
+ rq->fence.seqno);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto restore;
+ }
+
+ i915_request_put(rq);
+ }
+
+ if (!(flags & TEST_ACTIVE))
+ intel_engine_pm_put(engine);
+
+ if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to idle after reset\n",
+ engine->name, test_name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ goto restore;
+ }
+
+restore:
+ err2 = intel_selftest_restore_policy(engine, &saved);
+ if (err2)
+ pr_err("[%s] Restore policy failed: %d!\n", engine->name, err2);
+ if (err == 0)
+ err = err2;
+ if (err)
+ break;
+ } while (time_before(jiffies, end_time));
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+ st_engine_heartbeat_enable_no_pm(engine);
+
+ pr_info("i915_reset_engine(%s:%s): %lu resets\n",
+ engine->name, test_name, count);
+
+ /* GuC based resets are not logged per engine */
+ if (!using_guc) {
+ reported = i915_reset_engine_count(global, engine);
+ reported -= threads[engine->id].resets;
+ if (reported != count) {
+ pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
+ engine->name, test_name, count, reported);
+ if (!err)
+ err = -EINVAL;
+ }
+ }
+
+unwind:
+ for_each_engine(other, gt, tmp) {
+ int ret;
+
+ if (!threads[tmp].worker)
+ continue;
+
+ WRITE_ONCE(threads[tmp].stop, true);
+ kthread_flush_work(&threads[tmp].work);
+ ret = READ_ONCE(threads[tmp].result);
+ if (ret) {
+ pr_err("kthread for other engine %s failed, err=%d\n",
+ other->name, ret);
+ if (!err)
+ err = ret;
+ }
+
+ kthread_destroy_worker(threads[tmp].worker);
+
+ /* GuC based resets are not logged per engine */
+ if (!using_guc) {
+ if (other->uabi_class != engine->uabi_class &&
+ threads[tmp].resets !=
+ i915_reset_engine_count(global, other)) {
+ pr_err("Innocent engine %s was reset (count=%ld)\n",
+ other->name,
+ i915_reset_engine_count(global, other) -
+ threads[tmp].resets);
+ if (!err)
+ err = -EINVAL;
+ }
+ }
+ }
+
+ if (device != i915_reset_count(global)) {
+ pr_err("Global reset (count=%ld)!\n",
+ i915_reset_count(global) - device);
+ if (!err)
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915);
+ if (err) {
+ pr_err("[%s] Flush failed: %d!\n", engine->name, err);
+ break;
+ }
+ }
+ kfree(threads);
+
+ if (intel_gt_is_wedged(gt))
+ err = -EIO;
+
+ if (flags & TEST_ACTIVE)
+ hang_fini(&h);
+
+ return err;
+}
+
+static int igt_reset_engines(void *arg)
+{
+ static const struct {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "idle", 0 },
+ { "active", TEST_ACTIVE },
+ { "others-idle", TEST_OTHERS },
+ { "others-active", TEST_OTHERS | TEST_ACTIVE },
+ {
+ "others-priority",
+ TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
+ },
+ {
+ "self-priority",
+ TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
+ },
+ { }
+ };
+ struct intel_gt *gt = arg;
+ typeof(*phases) *p;
+ int err;
+
+ for (p = phases; p->name; p++) {
+ if (p->flags & TEST_PRIORITY) {
+ if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ continue;
+ }
+
+ err = __igt_reset_engines(arg, p->name, p->flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
+{
+ u32 count = i915_reset_count(&gt->i915->gpu_error);
+
+ intel_gt_reset(gt, mask, NULL);
+
+ return count;
+}
+
+static int igt_reset_wait(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ struct i915_request *rq;
+ unsigned int reset_count;
+ struct hang h;
+ long timeout;
+ int err;
+
+ engine = intel_selftest_find_any_engine(gt);
+
+ if (!engine || !intel_engine_can_store_dword(engine))
+ return 0;
+
+ /* Check that we detect a stuck waiter and issue a reset */
+
+ igt_global_reset_lock(gt);
+
+ err = hang_init(&h, gt);
+ if (err) {
+ pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
+ goto unlock;
+ }
+
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
+ goto fini;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!wait_until_running(&h, rq)) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to start request %llx, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ intel_gt_set_wedged(gt);
+
+ err = -EIO;
+ goto out_rq;
+ }
+
+ reset_count = fake_hangcheck(gt, ALL_ENGINES);
+
+ timeout = i915_request_wait(rq, 0, 10);
+ if (timeout < 0) {
+ pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
+ timeout);
+ err = timeout;
+ goto out_rq;
+ }
+
+ if (i915_reset_count(global) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ err = -EINVAL;
+ goto out_rq;
+ }
+
+out_rq:
+ i915_request_put(rq);
+fini:
+ hang_fini(&h);
+unlock:
+ igt_global_reset_unlock(gt);
+
+ if (intel_gt_is_wedged(gt))
+ return -EIO;
+
+ return err;
+}
+
+struct evict_vma {
+ struct completion completion;
+ struct i915_vma *vma;
+};
+
+static int evict_vma(void *data)
+{
+ struct evict_vma *arg = data;
+ struct i915_address_space *vm = arg->vma->vm;
+ struct drm_mm_node evict = arg->vma->node;
+ int err;
+
+ complete(&arg->completion);
+
+ mutex_lock(&vm->mutex);
+ err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
+static int evict_fence(void *data)
+{
+ struct evict_vma *arg = data;
+ int err;
+
+ complete(&arg->completion);
+
+ /* Mark the fence register as dirty to force the mmio update. */
+ err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
+ if (err) {
+ pr_err("Invalid Y-tiling settings; err:%d\n", err);
+ return err;
+ }
+
+ err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
+ if (err) {
+ pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
+ return err;
+ }
+
+ err = i915_vma_pin_fence(arg->vma);
+ i915_vma_unpin(arg->vma);
+ if (err) {
+ pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
+ return err;
+ }
+
+ i915_vma_unpin_fence(arg->vma);
+
+ return 0;
+}
+
+static int __igt_reset_evict_vma(struct intel_gt *gt,
+ struct i915_address_space *vm,
+ int (*fn)(void *),
+ unsigned int flags)
+{
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct task_struct *tsk = NULL;
+ struct i915_request *rq;
+ struct evict_vma arg;
+ struct hang h;
+ unsigned int pin_flags;
+ int err;
+
+ if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
+ return 0;
+
+ engine = intel_selftest_find_any_engine(gt);
+
+ if (!engine || !intel_engine_can_store_dword(engine))
+ return 0;
+
+ /* Check that we can recover an unbind stuck on a hanging request */
+
+ err = hang_init(&h, gt);
+ if (err) {
+ pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
+ return err;
+ }
+
+ obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("[%s] Create object failed: %d!\n", engine->name, err);
+ goto fini;
+ }
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+ err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
+ if (err) {
+ pr_err("Invalid X-tiling settings; err:%d\n", err);
+ goto out_obj;
+ }
+ }
+
+ arg.vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(arg.vma)) {
+ err = PTR_ERR(arg.vma);
+ pr_err("[%s] VMA instance failed: %d!\n", engine->name, err);
+ goto out_obj;
+ }
+
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
+ goto out_obj;
+ }
+
+ pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ pin_flags |= PIN_MAPPABLE;
+
+ err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
+ if (err) {
+ i915_request_add(rq);
+ pr_err("[%s] VMA pin failed: %d!\n", engine->name, err);
+ goto out_obj;
+ }
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+ err = i915_vma_pin_fence(arg.vma);
+ if (err) {
+ pr_err("Unable to pin X-tiled fence; err:%d\n", err);
+ i915_vma_unpin(arg.vma);
+ i915_request_add(rq);
+ goto out_obj;
+ }
+ }
+
+ i915_vma_lock(arg.vma);
+ err = i915_request_await_object(rq, arg.vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0) {
+ err = i915_vma_move_to_active(arg.vma, rq, flags);
+ if (err)
+ pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
+ } else {
+ pr_err("[%s] Request await failed: %d!\n", engine->name, err);
+ }
+
+ i915_vma_unlock(arg.vma);
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ i915_vma_unpin_fence(arg.vma);
+ i915_vma_unpin(arg.vma);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ goto out_rq;
+
+ if (!wait_until_running(&h, rq)) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to start request %llx, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ intel_gt_set_wedged(gt);
+ goto out_reset;
+ }
+
+ init_completion(&arg.completion);
+
+ tsk = kthread_run(fn, &arg, "igt/evict_vma");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
+ tsk = NULL;
+ goto out_reset;
+ }
+ get_task_struct(tsk);
+
+ wait_for_completion(&arg.completion);
+
+ if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("igt/evict_vma kthread did not wait\n");
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ intel_gt_set_wedged(gt);
+ goto out_reset;
+ }
+
+out_reset:
+ igt_global_reset_lock(gt);
+ fake_hangcheck(gt, rq->engine->mask);
+ igt_global_reset_unlock(gt);
+
+ if (tsk) {
+ struct intel_wedge_me w;
+
+ /* The reset, even indirectly, should take less than 10ms. */
+ intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
+ err = kthread_stop(tsk);
+
+ put_task_struct(tsk);
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_obj:
+ i915_gem_object_put(obj);
+fini:
+ hang_fini(&h);
+ if (intel_gt_is_wedged(gt))
+ return -EIO;
+
+ return err;
+}
+
+static int igt_reset_evict_ggtt(void *arg)
+{
+ struct intel_gt *gt = arg;
+
+ return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
+ evict_vma, EXEC_OBJECT_WRITE);
+}
+
+static int igt_reset_evict_ppgtt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+ /* aliasing == global gtt locking, covered above */
+ if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
+ return 0;
+
+ ppgtt = i915_ppgtt_create(gt, 0);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ err = __igt_reset_evict_vma(gt, &ppgtt->vm,
+ evict_vma, EXEC_OBJECT_WRITE);
+ i915_vm_put(&ppgtt->vm);
+
+ return err;
+}
+
+static int igt_reset_evict_fence(void *arg)
+{
+ struct intel_gt *gt = arg;
+
+ return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
+ evict_fence, EXEC_OBJECT_NEEDS_FENCE);
+}
+
+static int wait_for_others(struct intel_gt *gt,
+ struct intel_engine_cs *exclude)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id) {
+ if (engine == exclude)
+ continue;
+
+ if (!wait_for_idle(engine))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int igt_reset_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct hang h;
+ int err;
+
+ /* Check that we replay pending requests following a hang */
+
+ igt_global_reset_lock(gt);
+
+ err = hang_init(&h, gt);
+ if (err)
+ goto unlock;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_selftest_saved_policy saved;
+ struct i915_request *prev;
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+ bool using_guc = intel_engine_uses_guc(engine);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (using_guc) {
+ err = intel_selftest_modify_policy(engine, &saved,
+ SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK);
+ if (err) {
+ pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
+ goto fini;
+ }
+ }
+
+ prev = hang_create_request(&h, engine);
+ if (IS_ERR(prev)) {
+ err = PTR_ERR(prev);
+ pr_err("[%s] Create 'prev' hang request failed: %d!\n", engine->name, err);
+ goto restore;
+ }
+
+ i915_request_get(prev);
+ i915_request_add(prev);
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+ unsigned int reset_count;
+
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
+ goto restore;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /*
+ * XXX We don't handle resetting the kernel context
+ * very well. If we trigger a device reset twice in
+ * quick succession while the kernel context is
+ * executing, we may end up skipping the breadcrumb.
+ * This is really only a problem for the selftest as
+ * normally there is a large interlude between resets
+ * (hangcheck), or we focus on resetting just one
+ * engine and so avoid repeatedly resetting innocents.
+ */
+ err = wait_for_others(gt, engine);
+ if (err) {
+ pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
+ __func__, engine->name);
+ i915_request_put(rq);
+ i915_request_put(prev);
+
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(gt);
+ goto restore;
+ }
+
+ if (!wait_until_running(&h, prev)) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s(%s): Failed to start request %llx, at %x\n",
+ __func__, engine->name,
+ prev->fence.seqno, hws_seqno(&h, prev));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ i915_request_put(prev);
+
+ intel_gt_set_wedged(gt);
+
+ err = -EIO;
+ goto restore;
+ }
+
+ reset_count = fake_hangcheck(gt, BIT(id));
+
+ if (prev->fence.error != -EIO) {
+ pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
+ prev->fence.error);
+ i915_request_put(rq);
+ i915_request_put(prev);
+ err = -EINVAL;
+ goto restore;
+ }
+
+ if (rq->fence.error) {
+ pr_err("Fence error status not zero [%d] after unrelated reset\n",
+ rq->fence.error);
+ i915_request_put(rq);
+ i915_request_put(prev);
+ err = -EINVAL;
+ goto restore;
+ }
+
+ if (i915_reset_count(global) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ i915_request_put(rq);
+ i915_request_put(prev);
+ err = -EINVAL;
+ goto restore;
+ }
+
+ i915_request_put(prev);
+ prev = rq;
+ count++;
+ } while (time_before(jiffies, end_time));
+ pr_info("%s: Completed %d queued resets\n",
+ engine->name, count);
+
+ *h.batch = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(engine->gt);
+
+ i915_request_put(prev);
+
+restore:
+ if (using_guc) {
+ int err2 = intel_selftest_restore_policy(engine, &saved);
+
+ if (err2)
+ pr_err("%s:%d> [%s] Restore policy failed: %d!\n",
+ __func__, __LINE__, engine->name, err2);
+ if (err == 0)
+ err = err2;
+ }
+ if (err)
+ goto fini;
+
+ err = igt_flush_test(gt->i915);
+ if (err) {
+ pr_err("[%s] Flush failed: %d!\n", engine->name, err);
+ break;
+ }
+ }
+
+fini:
+ hang_fini(&h);
+unlock:
+ igt_global_reset_unlock(gt);
+
+ if (intel_gt_is_wedged(gt))
+ return -EIO;
+
+ return err;
+}
+
+static int igt_handle_error(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ struct hang h;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+ int err;
+
+ engine = intel_selftest_find_any_engine(gt);
+
+ /* Check that we can issue a global GPU and engine reset */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (!engine || !intel_engine_can_store_dword(engine))
+ return 0;
+
+ err = hang_init(&h, gt);
+ if (err) {
+ pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
+ return err;
+ }
+
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
+ goto err_fini;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!wait_until_running(&h, rq)) {
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to start request %llx, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+ intel_gt_set_wedged(gt);
+
+ err = -EIO;
+ goto err_request;
+ }
+
+ /* Temporarily disable error capture */
+ error = xchg(&global->first_error, (void *)-1);
+
+ intel_gt_handle_error(gt, engine->mask, 0, NULL);
+
+ xchg(&global->first_error, error);
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Guilty request not identified!\n");
+ err = -EINVAL;
+ goto err_request;
+ }
+
+err_request:
+ i915_request_put(rq);
+err_fini:
+ hang_fini(&h);
+ return err;
+}
+
+static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
+ const struct igt_atomic_section *p,
+ const char *mode)
+{
+ struct tasklet_struct * const t = &engine->sched_engine->tasklet;
+ int err;
+
+ GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
+ engine->name, mode, p->name);
+
+ if (t->func)
+ tasklet_disable(t);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
+ p->critical_section_begin();
+
+ err = __intel_engine_reset_bh(engine, NULL);
+
+ p->critical_section_end();
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
+
+ if (err)
+ pr_err("i915_reset_engine(%s:%s) failed under %s\n",
+ engine->name, mode, p->name);
+
+ return err;
+}
+
+static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
+ const struct igt_atomic_section *p)
+{
+ struct i915_request *rq;
+ struct hang h;
+ int err;
+
+ err = __igt_atomic_reset_engine(engine, p, "idle");
+ if (err)
+ return err;
+
+ err = hang_init(&h, engine->gt);
+ if (err) {
+ pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
+ return err;
+ }
+
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (wait_until_running(&h, rq)) {
+ err = __igt_atomic_reset_engine(engine, p, "active");
+ } else {
+ pr_err("%s(%s): Failed to start request %llx, at %x\n",
+ __func__, engine->name,
+ rq->fence.seqno, hws_seqno(&h, rq));
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ }
+
+ if (err == 0) {
+ struct intel_wedge_me w;
+
+ intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ if (intel_gt_is_wedged(engine->gt))
+ err = -EIO;
+ }
+
+ i915_request_put(rq);
+out:
+ hang_fini(&h);
+ return err;
+}
+
+static int igt_reset_engines_atomic(void *arg)
+{
+ struct intel_gt *gt = arg;
+ const typeof(*igt_atomic_phases) *p;
+ int err = 0;
+
+ /* Check that the engines resets are usable from atomic context */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ igt_global_reset_lock(gt);
+
+ /* Flush any requests before we get started and check basics */
+ if (!igt_force_reset(gt))
+ goto unlock;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id) {
+ err = igt_atomic_reset_engine(engine, p);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ /* As we poke around the guts, do a full reset before continuing. */
+ igt_force_reset(gt);
+unlock:
+ igt_global_reset_unlock(gt);
+
+ return err;
+}
+
+int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_hang_sanitycheck),
+ SUBTEST(igt_reset_nop),
+ SUBTEST(igt_reset_nop_engine),
+ SUBTEST(igt_reset_idle_engine),
+ SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_fail_engine),
+ SUBTEST(igt_reset_engines),
+ SUBTEST(igt_reset_engines_atomic),
+ SUBTEST(igt_reset_queue),
+ SUBTEST(igt_reset_wait),
+ SUBTEST(igt_reset_evict_ggtt),
+ SUBTEST(igt_reset_evict_ppgtt),
+ SUBTEST(igt_reset_evict_fence),
+ SUBTEST(igt_handle_error),
+ };
+ struct intel_gt *gt = to_gt(i915);
+ intel_wakeref_t wakeref;
+ int err;
+
+ if (!intel_has_gpu_reset(gt))
+ return 0;
+
+ if (intel_gt_is_wedged(gt))
+ return -EIO; /* we're long past hope of a successful reset */
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ err = intel_gt_live_subtests(tests, gt);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
new file mode 100644
index 000000000..cfd736d88
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_pm.h" /* intel_gpu_freq() */
+#include "selftest_llc.h"
+#include "intel_rps.h"
+
+static int gen6_verify_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ intel_wakeref_t wakeref;
+ unsigned int gpu_freq;
+ int err = 0;
+
+ wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
+
+ if (!get_ia_constants(llc, &consts))
+ goto out_rpm;
+
+ for (gpu_freq = consts.min_gpu_freq;
+ gpu_freq <= consts.max_gpu_freq;
+ gpu_freq++) {
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ unsigned int ia_freq, ring_freq, found;
+ u32 val;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+
+ val = gpu_freq;
+ if (snb_pcode_read(llc_to_gt(llc)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &val, NULL)) {
+ pr_err("Failed to read freq table[%d], range [%d, %d]\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
+ err = -ENXIO;
+ break;
+ }
+
+ found = (val >> 0) & 0xff;
+ if (found != ia_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ia_freq);
+ err = -EINVAL;
+ break;
+ }
+
+ found = (val >> 8) & 0xff;
+ if (found != ring_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ring_freq);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(llc_to_gt(llc)->uncore->rpm, wakeref);
+ return err;
+}
+
+int st_llc_verify(struct intel_llc *llc)
+{
+ return gen6_verify_ring_freq(llc);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h
new file mode 100644
index 000000000..88ee94800
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_LLC_H
+#define SELFTEST_LLC_H
+
+struct intel_llc;
+
+int st_llc_verify(struct intel_llc *llc);
+
+#endif /* SELFTEST_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
new file mode 100644
index 000000000..82d3f8058
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -0,0 +1,1977 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_internal.h"
+
+#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_reset.h"
+#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+#include "shmem_utils.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR 16
+#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+
+#define LRI_HEADER MI_INSTR(0x22, 0)
+#define LRI_LENGTH_MASK GENMASK(7, 0)
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
+}
+
+static bool is_active(struct i915_request *rq)
+{
+ if (i915_request_is_active(rq))
+ return true;
+
+ if (i915_request_on_hold(rq))
+ return true;
+
+ if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
+ return true;
+
+ return false;
+}
+
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ /* Ignore our own attempts to suppress excess tasklets */
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
+
+ timeout += jiffies;
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ if (i915_request_completed(rq)) /* that was quick! */
+ return 0;
+
+ /* Wait until the HW has acknowleged the submission (or err) */
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+ return 0;
+
+ if (done)
+ return -ETIME;
+
+ cond_resched();
+ } while (1);
+}
+
+static int emit_semaphore_signal(struct intel_context *ce, void *slot)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+ return 0;
+}
+
+static int context_flush(struct intel_context *ce, long timeout)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+ int err = 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ rmb(); /* We know the request is written, make sure all state is too! */
+ return err;
+}
+
+static int get_lri_mask(struct intel_engine_cs *engine, u32 lri)
+{
+ if ((lri & MI_LRI_LRM_CS_MMIO) == 0)
+ return ~0u;
+
+ if (GRAPHICS_VER(engine->i915) < 12)
+ return 0xfff;
+
+ switch (engine->class) {
+ default:
+ case RENDER_CLASS:
+ case COMPUTE_CLASS:
+ return 0x07ff;
+ case COPY_ENGINE_CLASS:
+ return 0x0fff;
+ case VIDEO_DECODE_CLASS:
+ case VIDEO_ENHANCEMENT_CLASS:
+ return 0x3fff;
+ }
+}
+
+static int live_lrc_layout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 *lrc;
+ int err;
+
+ /*
+ * Check the registers offsets we use to create the initial reg state
+ * match the layout saved by HW.
+ */
+
+ lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
+ if (!lrc)
+ return -ENOMEM;
+ GEM_BUG_ON(offset_in_page(lrc));
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ u32 *hw;
+ int dw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = shmem_pin_map(engine->default_state);
+ if (!hw) {
+ err = -ENOMEM;
+ break;
+ }
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
+
+ __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
+ engine->kernel_context, engine, true);
+
+ dw = 0;
+ do {
+ u32 lri = READ_ONCE(hw[dw]);
+ u32 lri_mask;
+
+ if (lri == 0) {
+ dw++;
+ continue;
+ }
+
+ if (lrc[dw] == 0) {
+ pr_debug("%s: skipped instruction %x at dword %d\n",
+ engine->name, lri, dw);
+ dw++;
+ continue;
+ }
+
+ if ((lri & GENMASK(31, 23)) != LRI_HEADER) {
+ pr_err("%s: Expected LRI command at dword %d, found %08x\n",
+ engine->name, dw, lri);
+ err = -EINVAL;
+ break;
+ }
+
+ if (lrc[dw] != lri) {
+ pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
+ engine->name, dw, lri, lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * When bit 19 of MI_LOAD_REGISTER_IMM instruction
+ * opcode is set on Gen12+ devices, HW does not
+ * care about certain register address offsets, and
+ * instead check the following for valid address
+ * ranges on specific engines:
+ * RCS && CCS: BITS(0 - 10)
+ * BCS: BITS(0 - 11)
+ * VECS && VCS: BITS(0 - 13)
+ */
+ lri_mask = get_lri_mask(engine, lri);
+
+ lri &= 0x7f;
+ lri++;
+ dw++;
+
+ while (lri) {
+ u32 offset = READ_ONCE(hw[dw]);
+
+ if ((offset ^ lrc[dw]) & lri_mask) {
+ pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
+ engine->name, dw, offset, lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Skip over the actual register value as we
+ * expect that to differ.
+ */
+ dw += 2;
+ lri -= 2;
+ }
+ } while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ if (err) {
+ pr_info("%s: HW register image:\n", engine->name);
+ igt_hexdump(hw, PAGE_SIZE);
+
+ pr_info("%s: SW register image:\n", engine->name);
+ igt_hexdump(lrc, PAGE_SIZE);
+ }
+
+ shmem_unpin_map(engine->default_state, hw);
+ if (err)
+ break;
+ }
+
+ free_page((unsigned long)lrc);
+ return err;
+}
+
+static int find_offset(const u32 *lri, u32 offset)
+{
+ int i;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ if (lri[i] == offset)
+ return i;
+
+ return -1;
+}
+
+static int live_lrc_fixed(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the assumed register offsets match the actual locations in
+ * the context image.
+ */
+
+ for_each_engine(engine, gt, id) {
+ const struct {
+ u32 reg;
+ u32 offset;
+ const char *name;
+ } tbl[] = {
+ {
+ i915_mmio_reg_offset(RING_START(engine->mmio_base)),
+ CTX_RING_START - 1,
+ "RING_START"
+ },
+ {
+ i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
+ CTX_RING_CTL - 1,
+ "RING_CTL"
+ },
+ {
+ i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
+ CTX_RING_HEAD - 1,
+ "RING_HEAD"
+ },
+ {
+ i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
+ CTX_RING_TAIL - 1,
+ "RING_TAIL"
+ },
+ {
+ i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
+ lrc_ring_mi_mode(engine),
+ "RING_MI_MODE"
+ },
+ {
+ i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
+ CTX_BB_STATE - 1,
+ "BB_STATE"
+ },
+ {
+ i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
+ lrc_ring_wa_bb_per_ctx(engine),
+ "RING_BB_PER_CTX_PTR"
+ },
+ {
+ i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
+ lrc_ring_indirect_ptr(engine),
+ "RING_INDIRECT_CTX_PTR"
+ },
+ {
+ i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
+ lrc_ring_indirect_offset(engine),
+ "RING_INDIRECT_CTX_OFFSET"
+ },
+ {
+ i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
+ CTX_TIMESTAMP - 1,
+ "RING_CTX_TIMESTAMP"
+ },
+ {
+ i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
+ lrc_ring_gpr0(engine),
+ "RING_CS_GPR0"
+ },
+ {
+ i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
+ lrc_ring_cmd_buf_cctl(engine),
+ "RING_CMD_BUF_CCTL"
+ },
+ {
+ i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)),
+ lrc_ring_bb_offset(engine),
+ "RING_BB_OFFSET"
+ },
+ { },
+ }, *t;
+ u32 *hw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = shmem_pin_map(engine->default_state);
+ if (!hw) {
+ err = -ENOMEM;
+ break;
+ }
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
+
+ for (t = tbl; t->name; t++) {
+ int dw = find_offset(hw, t->reg);
+
+ if (dw != t->offset) {
+ pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
+ engine->name,
+ t->name,
+ t->reg,
+ dw,
+ t->offset);
+ err = -EINVAL;
+ }
+ }
+
+ shmem_unpin_map(engine->default_state, hw);
+ }
+
+ return err;
+}
+
+static int __live_lrc_state(struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_gem_ww_ctx ww;
+ enum {
+ RING_START_IDX = 0,
+ RING_TAIL_IDX,
+ MAX_IDX
+ };
+ u32 expected[MAX_IDX];
+ u32 *cs;
+ int err;
+ int n;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(scratch->obj, &ww);
+ if (!err)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto err_put;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ cs = intel_ring_begin(rq, 4 * MAX_IDX);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_unpin;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ intel_engine_flush_submission(engine);
+ expected[RING_TAIL_IDX] = ce->ring->tail;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < MAX_IDX; n++) {
+ if (cs[n] != expected[n]) {
+ pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
+ engine->name, n, cs[n], expected[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_state(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the live register state matches what we expect for this
+ * intel_context.
+ */
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ for_each_engine(engine, gt, id) {
+ err = __live_lrc_state(engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int gpr_make_dirty(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ u32 *cs;
+ int n;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = CS_GPR(ce->engine, n);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static struct i915_request *
+__gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return ERR_CAST(cs);
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(ce->engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ rq = ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int __live_lrc_gpr(struct intel_engine_cs *engine,
+ struct i915_vma *scratch,
+ bool preempt)
+{
+ u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS)
+ return 0; /* GPR only on rcs0 for gen8 */
+
+ err = gpr_make_dirty(engine->kernel_context);
+ if (err)
+ return err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ rq = __gpr_read(ce, scratch, slot);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_put;
+ }
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
+
+ if (preempt) {
+ err = gpr_make_dirty(engine->kernel_context);
+ if (err)
+ goto err_rq;
+
+ err = emit_semaphore_signal(engine->kernel_context, slot);
+ if (err)
+ goto err_rq;
+
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
+ } else {
+ slot[0] = 1;
+ wmb();
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n]) {
+ pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
+ engine->name,
+ n / 2, n & 1 ? "udw" : "ldw",
+ cs[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ memset32(&slot[0], -1, 4);
+ wmb();
+ i915_request_put(rq);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_gpr(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that GPR registers are cleared in new contexts as we need
+ * to avoid leaking any information from previous contexts.
+ */
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ for_each_engine(engine, gt, id) {
+ st_engine_heartbeat_disable(engine);
+
+ err = __live_lrc_gpr(engine, scratch, false);
+ if (err)
+ goto err;
+
+ err = __live_lrc_gpr(engine, scratch, true);
+ if (err)
+ goto err;
+
+err:
+ st_engine_heartbeat_enable(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static struct i915_request *
+create_timestamp(struct intel_context *ce, void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+struct lrc_timestamp {
+ struct intel_engine_cs *engine;
+ struct intel_context *ce[2];
+ u32 poison;
+};
+
+static bool timestamp_advanced(u32 start, u32 end)
+{
+ return (s32)(end - start) > 0;
+}
+
+static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
+{
+ u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
+ struct i915_request *rq;
+ u32 timestamp;
+ int err = 0;
+
+ arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
+ rq = create_timestamp(arg->ce[0], slot, 1);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = wait_for_submit(rq->engine, rq, HZ / 2);
+ if (err)
+ goto err;
+
+ if (preempt) {
+ arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
+ err = emit_semaphore_signal(arg->ce[1], slot);
+ if (err)
+ goto err;
+ } else {
+ slot[0] = 1;
+ wmb();
+ }
+
+ /* And wait for switch to kernel (to save our context to memory) */
+ err = context_flush(arg->ce[0], HZ / 2);
+ if (err)
+ goto err;
+
+ if (!timestamp_advanced(arg->poison, slot[1])) {
+ pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
+ arg->engine->name, preempt ? "preempt" : "simple",
+ arg->poison, slot[1]);
+ err = -EINVAL;
+ }
+
+ timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
+ if (!timestamp_advanced(slot[1], timestamp)) {
+ pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
+ arg->engine->name, preempt ? "preempt" : "simple",
+ slot[1], timestamp);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(slot, -1, 4);
+ i915_request_put(rq);
+ return err;
+}
+
+static int live_lrc_timestamp(void *arg)
+{
+ struct lrc_timestamp data = {};
+ struct intel_gt *gt = arg;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ 0,
+ S32_MAX,
+ (u32)S32_MAX + 1,
+ U32_MAX,
+ };
+
+ /*
+ * We want to verify that the timestamp is saved and restore across
+ * context switches and is monotonic.
+ *
+ * So we do this with a little bit of LRC poisoning to check various
+ * boundary conditions, and see what happens if we preempt the context
+ * with a second request (carrying more poison into the timestamp).
+ */
+
+ for_each_engine(data.engine, gt, id) {
+ int i, err = 0;
+
+ st_engine_heartbeat_disable(data.engine);
+
+ for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(data.engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err;
+ }
+
+ data.ce[i] = tmp;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ data.poison = poison[i];
+
+ err = __lrc_timestamp(&data, false);
+ if (err)
+ break;
+
+ err = __lrc_timestamp(&data, true);
+ if (err)
+ break;
+ }
+
+err:
+ st_engine_heartbeat_enable(data.engine);
+ for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+ if (!data.ce[i])
+ break;
+
+ intel_context_unpin(data.ce[i]);
+ intel_context_put(data.ce[i]);
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_user_vma(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static u32 safe_poison(u32 offset, u32 poison)
+{
+ /*
+ * Do not enable predication as it will nop all subsequent commands,
+ * not only disabling the tests (by preventing all the other SRM) but
+ * also preventing the arbitration events at the end of the request.
+ */
+ if (offset == i915_mmio_reg_offset(RING_PREDICATE_RESULT(0)))
+ poison &= ~REG_BIT(0);
+
+ return poison;
+}
+
+static struct i915_vma *
+store_context(struct intel_context *ce, struct i915_vma *scratch)
+{
+ struct i915_vma *batch;
+ u32 dw, x, *cs, *hw;
+ u32 *defaults;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ i915_gem_object_unpin_map(batch->obj);
+ i915_vma_put(batch);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ x = 0;
+ dw = 0;
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /*
+ * Keep it simple, skip parsing complex commands
+ *
+ * At present, there are no more MI_LOAD_REGISTER_IMM
+ * commands after the first 3D state command. Rather
+ * than include a table (see i915_cmd_parser.c) of all
+ * the possible commands and their instruction lengths
+ * (or mask for variable length instructions), assume
+ * we have gathered the complete list of registers and
+ * bail out.
+ */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
+ /* Assume all other MI commands match LRI length mask */
+ dw += len + 2;
+ continue;
+ }
+
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = hw[dw];
+ *cs++ = lower_32_bits(scratch->node.start + x);
+ *cs++ = upper_32_bits(scratch->node.start + x);
+
+ dw += 2;
+ x += 4;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ shmem_unpin_map(ce->engine->default_state, defaults);
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int move_to_active(struct i915_request *rq,
+ struct i915_vma *vma,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, flags);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static struct i915_request *
+record_registers(struct intel_context *ce,
+ struct i915_vma *before,
+ struct i915_vma *after,
+ u32 *sema)
+{
+ struct i915_vma *b_before, *b_after;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ b_before = store_context(ce, before);
+ if (IS_ERR(b_before))
+ return ERR_CAST(b_before);
+
+ b_after = store_context(ce, after);
+ if (IS_ERR(b_after)) {
+ rq = ERR_CAST(b_after);
+ goto err_before;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_after;
+
+ err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_before, 0);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_after, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_before->node.start);
+ *cs++ = upper_32_bits(b_before->node.start);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_after->node.start);
+ *cs++ = upper_32_bits(b_after->node.start);
+
+ intel_ring_advance(rq, cs);
+
+ WRITE_ONCE(*sema, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+err_after:
+ i915_vma_put(b_after);
+err_before:
+ i915_vma_put(b_before);
+ return rq;
+
+err_rq:
+ i915_request_add(rq);
+ rq = ERR_PTR(err);
+ goto err_after;
+}
+
+static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
+{
+ struct i915_vma *batch;
+ u32 dw, *cs, *hw;
+ u32 *defaults;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ i915_gem_object_unpin_map(batch->obj);
+ i915_vma_put(batch);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dw = 0;
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
+ dw += len + 2;
+ continue;
+ }
+
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ *cs++ = MI_LOAD_REGISTER_IMM(len);
+ while (len--) {
+ *cs++ = hw[dw];
+ *cs++ = safe_poison(hw[dw] & get_lri_mask(ce->engine,
+ MI_LRI_LRM_CS_MMIO),
+ poison);
+ dw += 2;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ shmem_unpin_map(ce->engine->default_state, defaults);
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ u32 *cs;
+ int err;
+
+ batch = load_context(ce, poison);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = move_to_active(rq, batch, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(batch->node.start);
+ *cs++ = upper_32_bits(batch->node.start);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+err_rq:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_put(batch);
+ return err;
+}
+
+static bool is_moving(u32 a, u32 b)
+{
+ return a != b;
+}
+
+static int compare_isolation(struct intel_engine_cs *engine,
+ struct i915_vma *ref[2],
+ struct i915_vma *result[2],
+ struct intel_context *ce,
+ u32 poison)
+{
+ u32 x, dw, *hw, *lrc;
+ u32 *A[2], *B[2];
+ u32 *defaults;
+ int err = 0;
+
+ A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
+ if (IS_ERR(A[0]))
+ return PTR_ERR(A[0]);
+
+ A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
+ if (IS_ERR(A[1])) {
+ err = PTR_ERR(A[1]);
+ goto err_A0;
+ }
+
+ B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
+ if (IS_ERR(B[0])) {
+ err = PTR_ERR(B[0]);
+ goto err_A1;
+ }
+
+ B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
+ if (IS_ERR(B[1])) {
+ err = PTR_ERR(B[1]);
+ goto err_B0;
+ }
+
+ lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj,
+ false));
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
+ goto err_B1;
+ }
+ lrc += LRC_STATE_OFFSET / sizeof(*hw);
+
+ defaults = shmem_pin_map(ce->engine->default_state);
+ if (!defaults) {
+ err = -ENOMEM;
+ goto err_lrc;
+ }
+
+ x = 0;
+ dw = 0;
+ hw = defaults;
+ hw += LRC_STATE_OFFSET / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
+ dw += len + 2;
+ continue;
+ }
+
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ if (!is_moving(A[0][x], A[1][x]) &&
+ (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
+ switch (hw[dw] & 4095) {
+ case 0x30: /* RING_HEAD */
+ case 0x34: /* RING_TAIL */
+ break;
+
+ default:
+ pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
+ engine->name, dw,
+ hw[dw], hw[dw + 1],
+ A[0][x], B[0][x], B[1][x],
+ poison, lrc[dw + 1]);
+ err = -EINVAL;
+ }
+ }
+ dw += 2;
+ x++;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ shmem_unpin_map(ce->engine->default_state, defaults);
+err_lrc:
+ i915_gem_object_unpin_map(ce->state->obj);
+err_B1:
+ i915_gem_object_unpin_map(result[1]->obj);
+err_B0:
+ i915_gem_object_unpin_map(result[0]->obj);
+err_A1:
+ i915_gem_object_unpin_map(ref[1]->obj);
+err_A0:
+ i915_gem_object_unpin_map(ref[0]->obj);
+ return err;
+}
+
+static struct i915_vma *
+create_result_vma(struct i915_address_space *vm, unsigned long sz)
+{
+ struct i915_vma *vma;
+ void *ptr;
+
+ vma = create_user_vma(vm, sz);
+ if (IS_ERR(vma))
+ return vma;
+
+ /* Set the results to a known value distinct from the poison */
+ ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ i915_vma_put(vma);
+ return ERR_CAST(ptr);
+ }
+
+ memset(ptr, POISON_INUSE, vma->size);
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return vma;
+}
+
+static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
+{
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
+ struct i915_vma *ref[2], *result[2];
+ struct intel_context *A, *B;
+ struct i915_request *rq;
+ int err;
+
+ A = intel_context_create(engine);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ B = intel_context_create(engine);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto err_A;
+ }
+
+ ref[0] = create_result_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[0])) {
+ err = PTR_ERR(ref[0]);
+ goto err_B;
+ }
+
+ ref[1] = create_result_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[1])) {
+ err = PTR_ERR(ref[1]);
+ goto err_ref0;
+ }
+
+ rq = record_registers(A, ref[0], ref[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ref1;
+ }
+
+ WRITE_ONCE(*sema, 1);
+ wmb();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ref1;
+ }
+ i915_request_put(rq);
+
+ result[0] = create_result_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[0])) {
+ err = PTR_ERR(result[0]);
+ goto err_ref1;
+ }
+
+ result[1] = create_result_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[1])) {
+ err = PTR_ERR(result[1]);
+ goto err_result0;
+ }
+
+ rq = record_registers(A, result[0], result[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_result1;
+ }
+
+ err = poison_registers(B, poison, sema);
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("%s(%s): wait for results timed out\n",
+ __func__, engine->name);
+ err = -ETIME;
+ }
+
+ /* Always cancel the semaphore wait, just in case the GPU gets stuck */
+ WRITE_ONCE(*sema, -1);
+ i915_request_put(rq);
+ if (err)
+ goto err_result1;
+
+ err = compare_isolation(engine, ref, result, A, poison);
+
+err_result1:
+ i915_vma_put(result[1]);
+err_result0:
+ i915_vma_put(result[0]);
+err_ref1:
+ i915_vma_put(ref[1]);
+err_ref0:
+ i915_vma_put(ref[0]);
+err_B:
+ intel_context_put(B);
+err_A:
+ intel_context_put(A);
+ return err;
+}
+
+static bool skip_isolation(const struct intel_engine_cs *engine)
+{
+ if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9)
+ return true;
+
+ if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11)
+ return true;
+
+ return false;
+}
+
+static int live_lrc_isolation(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ STACK_MAGIC,
+ 0x3a3a3a3a,
+ 0x5c5c5c5c,
+ 0xffffffff,
+ 0xffff0000,
+ };
+ int err = 0;
+
+ /*
+ * Our goal is try and verify that per-context state cannot be
+ * tampered with by another non-privileged client.
+ *
+ * We take the list of context registers from the LRI in the default
+ * context image and attempt to modify that list from a remote context.
+ */
+
+ for_each_engine(engine, gt, id) {
+ int i;
+
+ /* Just don't even ask */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
+ skip_isolation(engine))
+ continue;
+
+ intel_engine_pm_get(engine);
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ int result;
+
+ result = __lrc_isolation(engine, poison[i]);
+ if (result && !err)
+ err = result;
+
+ result = __lrc_isolation(engine, ~poison[i]);
+ if (result && !err)
+ err = result;
+ }
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int indirect_ctx_submit_req(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
+#define CTX_BB_CANARY_OFFSET (3 * 1024)
+#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
+
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT |
+ MI_LRI_LRM_CS_MMIO;
+ *cs++ = i915_mmio_reg_offset(RING_START(0));
+ *cs++ = i915_ggtt_offset(ce->state) +
+ context_wa_bb_offset(ce) +
+ CTX_BB_CANARY_OFFSET;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static void
+indirect_ctx_bb_setup(struct intel_context *ce)
+{
+ u32 *cs = context_indirect_bb(ce);
+
+ cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
+
+ setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+}
+
+static bool check_ring_start(struct intel_context *ce)
+{
+ const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
+ LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+
+ if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
+ return true;
+
+ pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
+ ctx_bb[CTX_BB_CANARY_INDEX],
+ ce->lrc_reg_state[CTX_RING_START]);
+
+ return false;
+}
+
+static int indirect_ctx_bb_check(struct intel_context *ce)
+{
+ int err;
+
+ err = indirect_ctx_submit_req(ce);
+ if (err)
+ return err;
+
+ if (!check_ring_start(ce))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+{
+ struct intel_context *a, *b;
+ int err;
+
+ a = intel_context_create(engine);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+ err = intel_context_pin(a);
+ if (err)
+ goto put_a;
+
+ b = intel_context_create(engine);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto unpin_a;
+ }
+ err = intel_context_pin(b);
+ if (err)
+ goto put_b;
+
+ /* We use the already reserved extra page in context state */
+ if (!a->wa_bb_page) {
+ GEM_BUG_ON(b->wa_bb_page);
+ GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12);
+ goto unpin_b;
+ }
+
+ /*
+ * In order to test that our per context bb is truly per context,
+ * and executes at the intended spot on context restoring process,
+ * make the batch store the ring start value to memory.
+ * As ring start is restored apriori of starting the indirect ctx bb and
+ * as it will be different for each context, it fits to this purpose.
+ */
+ indirect_ctx_bb_setup(a);
+ indirect_ctx_bb_setup(b);
+
+ err = indirect_ctx_bb_check(a);
+ if (err)
+ goto unpin_b;
+
+ err = indirect_ctx_bb_check(b);
+
+unpin_b:
+ intel_context_unpin(b);
+put_b:
+ intel_context_put(b);
+unpin_a:
+ intel_context_unpin(a);
+put_a:
+ intel_context_put(a);
+
+ return err;
+}
+
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_lrc_indirect_ctx_bb(engine);
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static void garbage_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ local_bh_disable();
+ if (!test_and_set_bit(bit, lock)) {
+ tasklet_disable(&engine->sched_engine->tasklet);
+
+ if (!rq->fence.error)
+ __intel_engine_reset_bh(engine, NULL);
+
+ tasklet_enable(&engine->sched_engine->tasklet);
+ clear_and_wake_up_bit(bit, lock);
+ }
+ local_bh_enable();
+}
+
+static struct i915_request *garbage(struct intel_context *ce,
+ struct rnd_state *prng)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (err)
+ return ERR_PTR(err);
+
+ prandom_bytes_state(prng,
+ ce->lrc_reg_state,
+ ce->engine->context_size -
+ LRC_STATE_OFFSET);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+
+err_unpin:
+ intel_context_unpin(ce);
+ return ERR_PTR(err);
+}
+
+static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+ struct intel_context *ce;
+ struct i915_request *hang;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ hang = garbage(ce, prng);
+ if (IS_ERR(hang)) {
+ err = PTR_ERR(hang);
+ goto err_ce;
+ }
+
+ if (wait_for_submit(engine, hang, HZ / 2)) {
+ i915_request_put(hang);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ intel_context_set_banned(ce);
+ garbage_reset(engine, hang);
+
+ intel_engine_flush_submission(engine);
+ if (!hang->fence.error) {
+ i915_request_put(hang);
+ pr_err("%s: corrupted context was not reset\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_ce;
+ }
+
+ if (i915_request_wait(hang, 0, HZ / 2) < 0) {
+ pr_err("%s: corrupted context did not recover\n",
+ engine->name);
+ i915_request_put(hang);
+ err = -EIO;
+ goto err_ce;
+ }
+ i915_request_put(hang);
+
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_garbage(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Verify that we can recover if one context state is completely
+ * corrupted.
+ */
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ I915_RND_STATE(prng);
+ int err = 0, i;
+
+ if (!intel_has_reset_engine(engine->gt))
+ continue;
+
+ intel_engine_pm_get(engine);
+ for (i = 0; i < 3; i++) {
+ err = __lrc_garbage(engine, &prng);
+ if (err)
+ break;
+ }
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ ce->stats.runtime.num_underflow = 0;
+ ce->stats.runtime.max_underflow = 0;
+
+ do {
+ unsigned int loop = 1024;
+
+ while (loop) {
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_rq;
+ }
+
+ if (--loop == 0)
+ i915_request_get(rq);
+
+ i915_request_add(rq);
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+
+ i915_request_put(rq);
+ } while (1);
+
+ err = i915_request_wait(rq, 0, HZ / 5);
+ if (err < 0) {
+ pr_err("%s: request not completed!\n", engine->name);
+ goto err_wait;
+ }
+
+ igt_flush_test(engine->i915);
+
+ pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
+ engine->name,
+ intel_context_get_total_runtime_ns(ce),
+ intel_context_get_avg_runtime_ns(ce));
+
+ err = 0;
+ if (ce->stats.runtime.num_underflow) {
+ pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
+ engine->name,
+ ce->stats.runtime.num_underflow,
+ ce->stats.runtime.max_underflow);
+ GEM_TRACE_DUMP();
+ err = -EOVERFLOW;
+ }
+
+err_wait:
+ i915_request_put(rq);
+err_rq:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_pphwsp_runtime(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that cumulative context runtime as stored in the pphwsp[16]
+ * is monotonic.
+ */
+
+ for_each_engine(engine, gt, id) {
+ err = __live_pphwsp_runtime(engine);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
+int intel_lrc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_lrc_layout),
+ SUBTEST(live_lrc_fixed),
+ SUBTEST(live_lrc_state),
+ SUBTEST(live_lrc_gpr),
+ SUBTEST(live_lrc_isolation),
+ SUBTEST(live_lrc_timestamp),
+ SUBTEST(live_lrc_garbage),
+ SUBTEST(live_pphwsp_runtime),
+ SUBTEST(live_lrc_indirect_ctx_bb),
+ };
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
new file mode 100644
index 000000000..2b0c87999
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "gem/i915_gem_internal.h"
+
+#include "selftests/i915_random.h"
+
+static const unsigned int sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ CHUNK_SZ - SZ_4K,
+ CHUNK_SZ,
+ CHUNK_SZ + SZ_4K,
+ SZ_64M,
+};
+
+static struct drm_i915_gem_object *
+create_lmem_or_internal(struct drm_i915_private *i915, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_lmem(i915, size, 0);
+ if (!IS_ERR(obj))
+ return obj;
+
+ return i915_gem_object_create_internal(i915, size);
+}
+
+static int copy(struct intel_migrate *migrate,
+ int (*fn)(struct intel_migrate *migrate,
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct i915_request **out),
+ u32 sz, struct rnd_state *prng)
+{
+ struct drm_i915_private *i915 = migrate->context->engine->i915;
+ struct drm_i915_gem_object *src, *dst;
+ struct i915_request *rq;
+ struct i915_gem_ww_ctx ww;
+ u32 *vaddr;
+ int err = 0;
+ int i;
+
+ src = create_lmem_or_internal(i915, sz);
+ if (IS_ERR(src))
+ return 0;
+
+ sz = src->base.size;
+ dst = i915_gem_object_create_internal(i915, sz);
+ if (IS_ERR(dst))
+ goto err_free_src;
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(src, &ww);
+ if (err)
+ continue;
+
+ err = i915_gem_object_lock(dst, &ww);
+ if (err)
+ continue;
+
+ vaddr = i915_gem_object_pin_map(src, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ continue;
+ }
+
+ for (i = 0; i < sz / sizeof(u32); i++)
+ vaddr[i] = i;
+ i915_gem_object_flush_map(src);
+
+ vaddr = i915_gem_object_pin_map(dst, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto unpin_src;
+ }
+
+ for (i = 0; i < sz / sizeof(u32); i++)
+ vaddr[i] = ~i;
+ i915_gem_object_flush_map(dst);
+
+ err = fn(migrate, &ww, src, dst, &rq);
+ if (!err)
+ continue;
+
+ if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
+ pr_err("%ps failed, size: %u\n", fn, sz);
+ if (rq) {
+ i915_request_wait(rq, 0, HZ);
+ i915_request_put(rq);
+ }
+ i915_gem_object_unpin_map(dst);
+unpin_src:
+ i915_gem_object_unpin_map(src);
+ }
+ if (err)
+ goto err_out;
+
+ if (rq) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n", fn, sz);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+ }
+
+ for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
+ int x = i * 1024 + i915_prandom_u32_max_state(1024, prng);
+
+ if (vaddr[x] != x) {
+ pr_err("%ps failed, size: %u, offset: %zu\n",
+ fn, sz, x * sizeof(u32));
+ igt_hexdump(vaddr + i * 1024, 4096);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(dst);
+ i915_gem_object_unpin_map(src);
+
+err_out:
+ i915_gem_object_put(dst);
+err_free_src:
+ i915_gem_object_put(src);
+
+ return err;
+}
+
+static int intel_context_copy_ccs(struct intel_context *ce,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool write_to_ccs,
+ struct i915_request **out)
+{
+ u8 src_access = write_to_ccs ? DIRECT_ACCESS : INDIRECT_ACCESS;
+ u8 dst_access = write_to_ccs ? INDIRECT_ACCESS : DIRECT_ACCESS;
+ struct sgt_dma it = sg_sgt(sg);
+ struct i915_request *rq;
+ u32 offset;
+ int err;
+
+ GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+ *out = NULL;
+
+ GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+ offset = 0;
+ if (HAS_64K_PAGES(ce->engine->i915))
+ offset = CHUNK_SZ;
+
+ do {
+ int len;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
+ if (err)
+ goto out_rq;
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ deps = NULL;
+ }
+
+ /* The PTE updates + clear must not be interrupted. */
+ err = emit_no_arbitration(rq);
+ if (err)
+ goto out_rq;
+
+ len = emit_pte(rq, &it, cache_level, true, offset, CHUNK_SZ);
+ if (len <= 0) {
+ err = len;
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_copy_ccs(rq, offset, dst_access,
+ offset, src_access, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+
+ /* Arbitration is re-enabled between requests. */
+out_rq:
+ if (*out)
+ i915_request_put(*out);
+ *out = i915_request_get(rq);
+ i915_request_add(rq);
+ if (err || !it.sg || !sg_dma_len(it.sg))
+ break;
+
+ cond_resched();
+ } while (1);
+
+out_ce:
+ return err;
+}
+
+static int
+intel_migrate_ccs_copy(struct intel_migrate *m,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool write_to_ccs,
+ struct i915_request **out)
+{
+ struct intel_context *ce;
+ int err;
+
+ *out = NULL;
+ if (!m->context)
+ return -ENODEV;
+
+ ce = intel_migrate_create_context(m);
+ if (IS_ERR(ce))
+ ce = intel_context_get(m->context);
+ GEM_BUG_ON(IS_ERR(ce));
+
+ err = intel_context_pin_ww(ce, ww);
+ if (err)
+ goto out;
+
+ err = intel_context_copy_ccs(ce, deps, sg, cache_level,
+ write_to_ccs, out);
+
+ intel_context_unpin(ce);
+out:
+ intel_context_put(ce);
+ return err;
+}
+
+static int clear(struct intel_migrate *migrate,
+ int (*fn)(struct intel_migrate *migrate,
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object *obj,
+ u32 value,
+ struct i915_request **out),
+ u32 sz, struct rnd_state *prng)
+{
+ struct drm_i915_private *i915 = migrate->context->engine->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_request *rq;
+ struct i915_gem_ww_ctx ww;
+ u32 *vaddr, val = 0;
+ bool ccs_cap = false;
+ int err = 0;
+ int i;
+
+ obj = create_lmem_or_internal(i915, sz);
+ if (IS_ERR(obj))
+ return 0;
+
+ /* Consider the rounded up memory too */
+ sz = obj->base.size;
+
+ if (HAS_FLAT_CCS(i915) && i915_gem_object_is_lmem(obj))
+ ccs_cap = true;
+
+ for_i915_gem_ww(&ww, err, true) {
+ int ccs_bytes, ccs_bytes_per_chunk;
+
+ err = i915_gem_object_lock(obj, &ww);
+ if (err)
+ continue;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ continue;
+ }
+
+ for (i = 0; i < sz / sizeof(u32); i++)
+ vaddr[i] = ~i;
+ i915_gem_object_flush_map(obj);
+
+ if (ccs_cap && !val) {
+ /* Write the obj data into ccs surface */
+ err = intel_migrate_ccs_copy(migrate, &ww, NULL,
+ obj->mm.pages->sgl,
+ obj->cache_level,
+ true, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n",
+ fn, sz);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+ rq = NULL;
+ }
+ if (err)
+ continue;
+ }
+
+ err = fn(migrate, &ww, obj, val, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n", fn, sz);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+ rq = NULL;
+ }
+ if (err)
+ continue;
+
+ i915_gem_object_flush_map(obj);
+
+ /* Verify the set/clear of the obj mem */
+ for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
+ int x = i * 1024 +
+ i915_prandom_u32_max_state(1024, prng);
+
+ if (vaddr[x] != val) {
+ pr_err("%ps failed, (%u != %u), offset: %zu\n",
+ fn, vaddr[x], val, x * sizeof(u32));
+ igt_hexdump(vaddr + i * 1024, 4096);
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ continue;
+
+ if (ccs_cap && !val) {
+ for (i = 0; i < sz / sizeof(u32); i++)
+ vaddr[i] = ~i;
+ i915_gem_object_flush_map(obj);
+
+ err = intel_migrate_ccs_copy(migrate, &ww, NULL,
+ obj->mm.pages->sgl,
+ obj->cache_level,
+ false, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n",
+ fn, sz);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+ rq = NULL;
+ }
+ if (err)
+ continue;
+
+ ccs_bytes = GET_CCS_BYTES(i915, sz);
+ ccs_bytes_per_chunk = GET_CCS_BYTES(i915, CHUNK_SZ);
+ i915_gem_object_flush_map(obj);
+
+ for (i = 0; !err && i < DIV_ROUND_UP(ccs_bytes, PAGE_SIZE); i++) {
+ int offset = ((i * PAGE_SIZE) /
+ ccs_bytes_per_chunk) * CHUNK_SZ / sizeof(u32);
+ int ccs_bytes_left = (ccs_bytes - i * PAGE_SIZE) / sizeof(u32);
+ int x = i915_prandom_u32_max_state(min_t(int, 1024,
+ ccs_bytes_left), prng);
+
+ if (vaddr[offset + x]) {
+ pr_err("%ps ccs clearing failed, offset: %ld/%d\n",
+ fn, i * PAGE_SIZE + x * sizeof(u32), ccs_bytes);
+ igt_hexdump(vaddr + offset,
+ min_t(int, 4096,
+ ccs_bytes_left * sizeof(u32)));
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ continue;
+ }
+ i915_gem_object_unpin_map(obj);
+ }
+
+ if (err) {
+ if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
+ pr_err("%ps failed, size: %u\n", fn, sz);
+ if (rq && err != -EINVAL) {
+ i915_request_wait(rq, 0, HZ);
+ i915_request_put(rq);
+ }
+
+ i915_gem_object_unpin_map(obj);
+ }
+
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int __migrate_copy(struct intel_migrate *migrate,
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct i915_request **out)
+{
+ return intel_migrate_copy(migrate, ww, NULL,
+ src->mm.pages->sgl, src->cache_level,
+ i915_gem_object_is_lmem(src),
+ dst->mm.pages->sgl, dst->cache_level,
+ i915_gem_object_is_lmem(dst),
+ out);
+}
+
+static int __global_copy(struct intel_migrate *migrate,
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct i915_request **out)
+{
+ return intel_context_migrate_copy(migrate->context, NULL,
+ src->mm.pages->sgl, src->cache_level,
+ i915_gem_object_is_lmem(src),
+ dst->mm.pages->sgl, dst->cache_level,
+ i915_gem_object_is_lmem(dst),
+ out);
+}
+
+static int
+migrate_copy(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
+{
+ return copy(migrate, __migrate_copy, sz, prng);
+}
+
+static int
+global_copy(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
+{
+ return copy(migrate, __global_copy, sz, prng);
+}
+
+static int __migrate_clear(struct intel_migrate *migrate,
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object *obj,
+ u32 value,
+ struct i915_request **out)
+{
+ return intel_migrate_clear(migrate, ww, NULL,
+ obj->mm.pages->sgl,
+ obj->cache_level,
+ i915_gem_object_is_lmem(obj),
+ value, out);
+}
+
+static int __global_clear(struct intel_migrate *migrate,
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object *obj,
+ u32 value,
+ struct i915_request **out)
+{
+ return intel_context_migrate_clear(migrate->context, NULL,
+ obj->mm.pages->sgl,
+ obj->cache_level,
+ i915_gem_object_is_lmem(obj),
+ value, out);
+}
+
+static int
+migrate_clear(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
+{
+ return clear(migrate, __migrate_clear, sz, prng);
+}
+
+static int
+global_clear(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
+{
+ return clear(migrate, __global_clear, sz, prng);
+}
+
+static int live_migrate_copy(void *arg)
+{
+ struct intel_migrate *migrate = arg;
+ struct drm_i915_private *i915 = migrate->context->engine->i915;
+ I915_RND_STATE(prng);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ int err;
+
+ err = migrate_copy(migrate, sizes[i], &prng);
+ if (err == 0)
+ err = global_copy(migrate, sizes[i], &prng);
+ i915_gem_drain_freed_objects(i915);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int live_migrate_clear(void *arg)
+{
+ struct intel_migrate *migrate = arg;
+ struct drm_i915_private *i915 = migrate->context->engine->i915;
+ I915_RND_STATE(prng);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ int err;
+
+ err = migrate_clear(migrate, sizes[i], &prng);
+ if (err == 0)
+ err = global_clear(migrate, sizes[i], &prng);
+
+ i915_gem_drain_freed_objects(i915);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct threaded_migrate {
+ struct intel_migrate *migrate;
+ struct task_struct *tsk;
+ struct rnd_state prng;
+};
+
+static int threaded_migrate(struct intel_migrate *migrate,
+ int (*fn)(void *arg),
+ unsigned int flags)
+{
+ const unsigned int n_cpus = num_online_cpus() + 1;
+ struct threaded_migrate *thread;
+ I915_RND_STATE(prng);
+ unsigned int i;
+ int err = 0;
+
+ thread = kcalloc(n_cpus, sizeof(*thread), GFP_KERNEL);
+ if (!thread)
+ return 0;
+
+ for (i = 0; i < n_cpus; ++i) {
+ struct task_struct *tsk;
+
+ thread[i].migrate = migrate;
+ thread[i].prng =
+ I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
+
+ tsk = kthread_run(fn, &thread[i], "igt-%d", i);
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ break;
+ }
+
+ get_task_struct(tsk);
+ thread[i].tsk = tsk;
+ }
+
+ msleep(10); /* start all threads before we kthread_stop() */
+
+ for (i = 0; i < n_cpus; ++i) {
+ struct task_struct *tsk = thread[i].tsk;
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk))
+ continue;
+
+ status = kthread_stop(tsk);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk);
+ }
+
+ kfree(thread);
+ return err;
+}
+
+static int __thread_migrate_copy(void *arg)
+{
+ struct threaded_migrate *tm = arg;
+
+ return migrate_copy(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
+}
+
+static int thread_migrate_copy(void *arg)
+{
+ return threaded_migrate(arg, __thread_migrate_copy, 0);
+}
+
+static int __thread_global_copy(void *arg)
+{
+ struct threaded_migrate *tm = arg;
+
+ return global_copy(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
+}
+
+static int thread_global_copy(void *arg)
+{
+ return threaded_migrate(arg, __thread_global_copy, 0);
+}
+
+static int __thread_migrate_clear(void *arg)
+{
+ struct threaded_migrate *tm = arg;
+
+ return migrate_clear(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
+}
+
+static int __thread_global_clear(void *arg)
+{
+ struct threaded_migrate *tm = arg;
+
+ return global_clear(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
+}
+
+static int thread_migrate_clear(void *arg)
+{
+ return threaded_migrate(arg, __thread_migrate_clear, 0);
+}
+
+static int thread_global_clear(void *arg)
+{
+ return threaded_migrate(arg, __thread_global_clear, 0);
+}
+
+int intel_migrate_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_migrate_copy),
+ SUBTEST(live_migrate_clear),
+ SUBTEST(thread_migrate_copy),
+ SUBTEST(thread_migrate_clear),
+ SUBTEST(thread_global_copy),
+ SUBTEST(thread_global_clear),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!gt->migrate.context)
+ return 0;
+
+ return i915_subtests(tests, &gt->migrate);
+}
+
+static struct drm_i915_gem_object *
+create_init_lmem_internal(struct intel_gt *gt, size_t sz, bool try_lmem)
+{
+ struct drm_i915_gem_object *obj = NULL;
+ int err;
+
+ if (try_lmem)
+ obj = i915_gem_object_create_lmem(gt->i915, sz, 0);
+
+ if (IS_ERR_OR_NULL(obj)) {
+ obj = i915_gem_object_create_internal(gt->i915, sz);
+ if (IS_ERR(obj))
+ return obj;
+ }
+
+ i915_gem_object_trylock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return obj;
+}
+
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static int __perf_clear_blt(struct intel_context *ce,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool is_lmem,
+ size_t sz)
+{
+ ktime_t t[5];
+ int pass;
+ int err = 0;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct i915_request *rq;
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ err = intel_context_migrate_clear(ce, NULL, sg, cache_level,
+ is_lmem, 0, &rq);
+ if (rq) {
+ if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ }
+ if (err)
+ break;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+ if (err)
+ return err;
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: %zd KiB fill: %lld MiB/s\n",
+ ce->engine->name, sz >> 10,
+ div64_u64(mul_u32_u32(4 * sz,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ return 0;
+}
+
+static int perf_clear_blt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *dst;
+ int err;
+
+ dst = create_init_lmem_internal(gt, sizes[i], true);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ err = __perf_clear_blt(gt->migrate.context,
+ dst->mm.pages->sgl,
+ I915_CACHE_NONE,
+ i915_gem_object_is_lmem(dst),
+ sizes[i]);
+
+ i915_gem_object_unlock(dst);
+ i915_gem_object_put(dst);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __perf_copy_blt(struct intel_context *ce,
+ struct scatterlist *src,
+ enum i915_cache_level src_cache_level,
+ bool src_is_lmem,
+ struct scatterlist *dst,
+ enum i915_cache_level dst_cache_level,
+ bool dst_is_lmem,
+ size_t sz)
+{
+ ktime_t t[5];
+ int pass;
+ int err = 0;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct i915_request *rq;
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ err = intel_context_migrate_copy(ce, NULL,
+ src, src_cache_level,
+ src_is_lmem,
+ dst, dst_cache_level,
+ dst_is_lmem,
+ &rq);
+ if (rq) {
+ if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ }
+ if (err)
+ break;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+ if (err)
+ return err;
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: %zd KiB copy: %lld MiB/s\n",
+ ce->engine->name, sz >> 10,
+ div64_u64(mul_u32_u32(4 * sz,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ return 0;
+}
+
+static int perf_copy_blt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *src, *dst;
+ size_t sz;
+ int err;
+
+ src = create_init_lmem_internal(gt, sizes[i], true);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
+ sz = src->base.size;
+ dst = create_init_lmem_internal(gt, sz, false);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err_src;
+ }
+
+ err = __perf_copy_blt(gt->migrate.context,
+ src->mm.pages->sgl,
+ I915_CACHE_NONE,
+ i915_gem_object_is_lmem(src),
+ dst->mm.pages->sgl,
+ I915_CACHE_NONE,
+ i915_gem_object_is_lmem(dst),
+ sz);
+
+ i915_gem_object_unlock(dst);
+ i915_gem_object_put(dst);
+err_src:
+ i915_gem_object_unlock(src);
+ i915_gem_object_put(src);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_migrate_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_clear_blt),
+ SUBTEST(perf_copy_blt),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!gt->migrate.context)
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000..c1d861333
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/intel_scheduler_helpers.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct drm_i915_mocs_table *mocs;
+ struct drm_i915_mocs_table *l3cc;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ /* We build large requests to read the registers from the ring */
+ ce->ring_size = SZ_16K;
+
+ return ce;
+}
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ unsigned int flags;
+ int err;
+
+ memset(arg, 0, sizeof(*arg));
+
+ flags = get_mocs_settings(gt->i915, &arg->table);
+ if (!flags)
+ return -EINVAL;
+
+ if (flags & HAS_RENDER_L3CC)
+ arg->l3cc = &arg->table;
+
+ if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
+ arg->mocs = &arg->table;
+
+ arg->scratch =
+ __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
+ if (IS_ERR(arg->scratch))
+ return PTR_ERR(arg->scratch);
+
+ arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_unpin_and_release(&arg->scratch, 0);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+}
+
+static int read_regs(struct i915_request *rq,
+ u32 addr, unsigned int count,
+ u32 *offset)
+{
+ unsigned int i;
+ u32 *cs;
+
+ GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < count; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = *offset;
+ *cs++ = 0;
+
+ addr += sizeof(u32);
+ *offset += sizeof(u32);
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ u32 *offset)
+{
+ u32 addr;
+
+ if (!table)
+ return 0;
+
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
+ addr = global_mocs_offset();
+ else
+ addr = mocs_offset(rq->engine);
+
+ return read_regs(rq, addr, table->n_entries, offset);
+}
+
+static int read_l3cc_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ u32 *offset)
+{
+ u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+
+ if (!table)
+ return 0;
+
+ return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ u32 **vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ if (!table)
+ return 0;
+
+ for_each_mocs(expect, table, i) {
+ if (**vaddr != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ }
+
+ return 0;
+}
+
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+ /*
+ * Registers in this range are affected by the MCR selector
+ * which only controls CPU initiated MMIO. Routing does not
+ * work for CS access so we cannot verify them on this path.
+ */
+ return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
+}
+
+static int check_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ u32 **vaddr)
+{
+ /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
+ u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+ unsigned int i;
+ u32 expect;
+
+ if (!table)
+ return 0;
+
+ for_each_l3cc(expect, table, i) {
+ if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ reg += 4;
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_vma *vma = arg->scratch;
+ struct i915_request *rq;
+ u32 offset;
+ u32 *vaddr;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+
+ /* Read the mocs tables back using SRM */
+ offset = i915_ggtt_offset(vma);
+ if (!err)
+ err = read_mocs_table(rq, arg->mocs, &offset);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = read_l3cc_table(rq, arg->l3cc, &offset);
+ offset -= i915_ggtt_offset(vma);
+ GEM_BUG_ON(offset > PAGE_SIZE);
+
+ err = request_add_sync(rq, err);
+ if (err)
+ return err;
+
+ /* Compare the results against the expected tables */
+ vaddr = arg->vaddr;
+ if (!err)
+ err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(arg->vaddr + offset != vaddr);
+ return 0;
+}
+
+static int live_mocs_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ /* Basic check the system is configured with the expected mocs table */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = check_mocs_engine(&mocs, engine->kernel_context);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ /* Every new context should see the same mocs table */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = mocs_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = check_mocs_engine(&mocs, ce);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason,
+ bool using_guc)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0 && !using_guc)
+ err = intel_engine_reset(ce->engine, reason);
+
+ /* Ensure the reset happens and kills the engine */
+ if (err == 0)
+ err = intel_selftest_wait_for_rq(rq);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+}
+
+static int __live_mocs_reset(struct live_mocs *mocs,
+ struct intel_context *ce, bool using_guc)
+{
+ struct intel_gt *gt = ce->engine->gt;
+ int err;
+
+ if (intel_has_reset_engine(gt)) {
+ if (!using_guc) {
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
+
+ err = active_engine_reset(ce, "mocs", using_guc);
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
+
+ if (intel_has_gpu_reset(gt)) {
+ intel_gt_reset(gt, ce->engine->mask, "mocs");
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err = 0;
+
+ /* Check the mocs setup is retained over per-engine and global resets */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ for_each_engine(engine, gt, id) {
+ bool using_guc = intel_engine_uses_guc(engine);
+ struct intel_selftest_saved_policy saved;
+ struct intel_context *ce;
+ int err2;
+
+ err = intel_selftest_modify_policy(engine, &saved,
+ SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+ if (err)
+ break;
+
+ ce = mocs_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto restore;
+ }
+
+ intel_engine_pm_get(engine);
+
+ err = __live_mocs_reset(&mocs, ce, using_guc);
+
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+
+restore:
+ err2 = intel_selftest_restore_policy(engine, &saved);
+ if (err == 0)
+ err = err2;
+ if (err)
+ break;
+ }
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_kernel),
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
new file mode 100644
index 000000000..8c70b7e12
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
+#include "selftest_rc6.h"
+
+#include "selftests/i915_random.h"
+#include "selftests/librapl.h"
+
+static u64 rc6_residency(struct intel_rc6 *rc6)
+{
+ u64 result;
+
+ /* XXX VLV_GT_MEDIA_RC6? */
+
+ result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ if (HAS_RC6p(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p);
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp);
+
+ return result;
+}
+
+int live_rc6_manual(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rc6 *rc6 = &gt->rc6;
+ u64 rc0_power, rc6_power;
+ intel_wakeref_t wakeref;
+ bool has_power;
+ ktime_t dt;
+ u64 res[2];
+ int err = 0;
+
+ /*
+ * Our claim is that we can "encourage" the GPU to enter rc6 at will.
+ * Let's try it!
+ */
+
+ if (!rc6->enabled)
+ return 0;
+
+ /* bsw/byt use a PCU and decouple RC6 from our manual control */
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+ return 0;
+
+ has_power = librapl_supported(gt->i915);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ /* Force RC6 off for starters */
+ __intel_rc6_disable(rc6);
+ msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+
+ res[0] = rc6_residency(rc6);
+
+ dt = ktime_get();
+ rc0_power = librapl_energy_uJ();
+ msleep(250);
+ rc0_power = librapl_energy_uJ() - rc0_power;
+ dt = ktime_sub(ktime_get(), dt);
+ res[1] = rc6_residency(rc6);
+ if ((res[1] - res[0]) >> 10) {
+ pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
+ (res[1] - res[0]) >> 10);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (has_power) {
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
+ ktime_to_ns(dt));
+ if (!rc0_power) {
+ pr_err("No power measured while in RC0\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ /* Manually enter RC6 */
+ intel_rc6_park(rc6);
+
+ res[0] = rc6_residency(rc6);
+ intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
+ dt = ktime_get();
+ rc6_power = librapl_energy_uJ();
+ msleep(100);
+ rc6_power = librapl_energy_uJ() - rc6_power;
+ dt = ktime_sub(ktime_get(), dt);
+ res[1] = rc6_residency(rc6);
+ if (res[1] == res[0]) {
+ pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL),
+ res[0]);
+ err = -EINVAL;
+ }
+
+ if (has_power) {
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_power,
+ ktime_to_ns(dt));
+ pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ rc0_power, rc6_power);
+ if (2 * rc6_power > rc0_power) {
+ pr_err("GPU leaked energy while in RC6!\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ /* Restore what should have been the original state! */
+ intel_rc6_unpark(rc6);
+
+out_unlock:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return err;
+}
+
+static const u32 *__live_rc6_ctx(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ const u32 *result;
+ u32 cmd;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return ERR_CAST(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return cs;
+ }
+
+ cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ cmd++;
+
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO);
+ *cs++ = ce->timeline->hwsp_offset + 8;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ result = rq->hwsp_seqno + 2;
+ i915_request_add(rq);
+
+ return result;
+}
+
+static struct intel_engine_cs **
+randomised_engines(struct intel_gt *gt,
+ struct rnd_state *prng,
+ unsigned int *count)
+{
+ struct intel_engine_cs *engine, **engines;
+ enum intel_engine_id id;
+ int n;
+
+ n = 0;
+ for_each_engine(engine, gt, id)
+ n++;
+ if (!n)
+ return NULL;
+
+ engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return NULL;
+
+ n = 0;
+ for_each_engine(engine, gt, id)
+ engines[n++] = engine;
+
+ i915_prandom_shuffle(engines, sizeof(*engines), n, prng);
+
+ *count = n;
+ return engines;
+}
+
+int live_rc6_ctx_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs **engines;
+ unsigned int n, count;
+ I915_RND_STATE(prng);
+ int err = 0;
+
+ /* A read of CTX_INFO upsets rc6. Poke the bear! */
+ if (GRAPHICS_VER(gt->i915) < 8)
+ return 0;
+
+ engines = randomised_engines(gt, &prng, &count);
+ if (!engines)
+ return 0;
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *engine = engines[n];
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ struct i915_gpu_error *error = &gt->i915->gpu_error;
+ struct intel_context *ce;
+ unsigned int resets =
+ i915_reset_engine_count(error, engine);
+ const u32 *res;
+
+ /* Use a sacrifical context */
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ intel_engine_pm_get(engine);
+ res = __live_rc6_ctx(ce);
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ goto out;
+ }
+
+ if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ pr_debug("%s: CTX_INFO=%0x\n",
+ engine->name, READ_ONCE(*res));
+
+ if (resets !=
+ i915_reset_engine_count(error, engine)) {
+ pr_err("%s: GPU reset required\n",
+ engine->name);
+ add_taint_for_CI(gt->i915, TAINT_WARN);
+ err = -EIO;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(engines);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.h b/drivers/gpu/drm/i915/gt/selftest_rc6.h
new file mode 100644
index 000000000..daf092790
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_RC6_H
+#define SELFTEST_RC6_H
+
+int live_rc6_ctx_wa(void *arg);
+int live_rc6_manual(void *arg);
+
+#endif /* SELFTEST_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
new file mode 100644
index 000000000..37c38bdd5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/crc32.h>
+
+#include "gem/i915_gem_stolen.h"
+
+#include "i915_memcpy.h"
+#include "i915_selftest.h"
+#include "intel_gpu_commands.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_atomic.h"
+#include "selftests/igt_spinner.h"
+
+static int
+__igt_reset_stolen(struct intel_gt *gt,
+ intel_engine_mask_t mask,
+ const char *msg)
+{
+ struct i915_ggtt *ggtt = gt->ggtt;
+ const struct resource *dsm = &gt->i915->dsm;
+ resource_size_t num_pages, page;
+ struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ long max, count;
+ void *tmp;
+ u32 *crc;
+ int err;
+
+ if (!drm_mm_node_allocated(&ggtt->error_capture))
+ return 0;
+
+ num_pages = resource_size(dsm) >> PAGE_SHIFT;
+ if (!num_pages)
+ return 0;
+
+ crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL);
+ if (!crc)
+ return -ENOMEM;
+
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto err_crc;
+ }
+
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ err = igt_spinner_init(&spin, gt);
+ if (err)
+ goto err_lock;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ if (!(mask & engine->mask))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err_spin;
+ }
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_spin;
+ }
+ i915_request_add(rq);
+ }
+
+ for (page = 0; page < num_pages; page++) {
+ dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
+ void __iomem *s;
+ void *in;
+
+ ggtt->vm.insert_page(&ggtt->vm, dma,
+ ggtt->error_capture.start,
+ I915_CACHE_NONE, 0);
+ mb();
+
+ s = io_mapping_map_wc(&ggtt->iomap,
+ ggtt->error_capture.start,
+ PAGE_SIZE);
+
+ if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
+ page << PAGE_SHIFT,
+ ((page + 1) << PAGE_SHIFT) - 1))
+ memset_io(s, STACK_MAGIC, PAGE_SIZE);
+
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
+ in = tmp;
+ crc[page] = crc32_le(0, in, PAGE_SIZE);
+
+ io_mapping_unmap(s);
+ }
+ mb();
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
+
+ if (mask == ALL_ENGINES) {
+ intel_gt_reset(gt, mask, NULL);
+ } else {
+ for_each_engine(engine, gt, id) {
+ if (mask & engine->mask)
+ intel_engine_reset(engine, NULL);
+ }
+ }
+
+ max = -1;
+ count = 0;
+ for (page = 0; page < num_pages; page++) {
+ dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
+ void __iomem *s;
+ void *in;
+ u32 x;
+
+ ggtt->vm.insert_page(&ggtt->vm, dma,
+ ggtt->error_capture.start,
+ I915_CACHE_NONE, 0);
+ mb();
+
+ s = io_mapping_map_wc(&ggtt->iomap,
+ ggtt->error_capture.start,
+ PAGE_SIZE);
+
+ in = (void __force *)s;
+ if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
+ in = tmp;
+ x = crc32_le(0, in, PAGE_SIZE);
+
+ if (x != crc[page] &&
+ !__drm_mm_interval_first(&gt->i915->mm.stolen,
+ page << PAGE_SHIFT,
+ ((page + 1) << PAGE_SHIFT) - 1)) {
+ pr_debug("unused stolen page %pa modified by GPU reset\n",
+ &page);
+ if (count++ == 0)
+ igt_hexdump(in, PAGE_SIZE);
+ max = page;
+ }
+
+ io_mapping_unmap(s);
+ }
+ mb();
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
+
+ if (count > 0) {
+ pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
+ msg, count, max);
+ }
+ if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) {
+ pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
+ msg, I915_GEM_STOLEN_BIAS);
+ err = -EINVAL;
+ }
+
+err_spin:
+ igt_spinner_fini(&spin);
+
+err_lock:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
+
+ kfree(tmp);
+err_crc:
+ kfree(crc);
+ return err;
+}
+
+static int igt_reset_device_stolen(void *arg)
+{
+ return __igt_reset_stolen(arg, ALL_ENGINES, "device");
+}
+
+static int igt_reset_engines_stolen(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __igt_reset_stolen(gt, engine->mask, engine->name);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int igt_global_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ unsigned int reset_count;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ /* Check that we can issue a global GPU reset */
+
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ reset_count = i915_reset_count(&gt->i915->gpu_error);
+
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
+
+ if (i915_reset_count(&gt->i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ err = -EINVAL;
+ }
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
+
+ if (intel_gt_is_wedged(gt))
+ err = -EIO;
+
+ return err;
+}
+
+static int igt_wedged_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ intel_wakeref_t wakeref;
+
+ /* Check that we can recover a wedged device with a GPU reset */
+
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ intel_gt_set_wedged(gt);
+
+ GEM_BUG_ON(!intel_gt_is_wedged(gt));
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
+
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
+}
+
+static int igt_atomic_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ const typeof(*igt_atomic_phases) *p;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
+
+ /* Flush any requests before we get started and check basics */
+ if (!igt_force_reset(gt))
+ goto unlock;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ intel_engine_mask_t awake;
+
+ GEM_TRACE("__intel_gt_reset under %s\n", p->name);
+
+ awake = reset_prepare(gt);
+ p->critical_section_begin();
+
+ err = __intel_gt_reset(gt, ALL_ENGINES);
+
+ p->critical_section_end();
+ reset_finish(gt, awake);
+
+ if (err) {
+ pr_err("__intel_gt_reset failed under %s\n", p->name);
+ break;
+ }
+ }
+
+ /* As we poke around the guts, do a full reset before continuing. */
+ igt_force_reset(gt);
+
+unlock:
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
+
+ return err;
+}
+
+static int igt_atomic_engine_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ const typeof(*igt_atomic_phases) *p;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
+
+ /* Flush any requests before we get started and check basics */
+ if (!igt_force_reset(gt))
+ goto out_unlock;
+
+ for_each_engine(engine, gt, id) {
+ struct tasklet_struct *t = &engine->sched_engine->tasklet;
+
+ if (t->func)
+ tasklet_disable(t);
+ intel_engine_pm_get(engine);
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ GEM_TRACE("intel_engine_reset(%s) under %s\n",
+ engine->name, p->name);
+ if (strcmp(p->name, "softirq"))
+ local_bh_disable();
+
+ p->critical_section_begin();
+ err = __intel_engine_reset_bh(engine, NULL);
+ p->critical_section_end();
+
+ if (strcmp(p->name, "softirq"))
+ local_bh_enable();
+
+ if (err) {
+ pr_err("intel_engine_reset(%s) failed under %s\n",
+ engine->name, p->name);
+ break;
+ }
+ }
+
+ intel_engine_pm_put(engine);
+ if (t->func) {
+ tasklet_enable(t);
+ tasklet_hi_schedule(t);
+ }
+ if (err)
+ break;
+ }
+
+ /* As we poke around the guts, do a full reset before continuing. */
+ igt_force_reset(gt);
+
+out_unlock:
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
+
+ return err;
+}
+
+int intel_reset_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_global_reset), /* attempt to recover GPU first */
+ SUBTEST(igt_reset_device_stolen),
+ SUBTEST(igt_reset_engines_stolen),
+ SUBTEST(igt_wedged_reset),
+ SUBTEST(igt_atomic_reset),
+ SUBTEST(igt_atomic_engine_reset),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!intel_has_gpu_reset(gt))
+ return 0;
+
+ if (intel_gt_is_wedged(gt))
+ return -EIO; /* we're long past hope of a successful reset */
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring.c b/drivers/gpu/drm/i915/gt/selftest_ring.c
new file mode 100644
index 000000000..2a8c534dc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+static struct intel_ring *mock_ring(unsigned long sz)
+{
+ struct intel_ring *ring;
+
+ ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ kref_init(&ring->ref);
+ ring->size = sz;
+ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz);
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+ atomic_set(&ring->pin_count, 1);
+
+ intel_ring_update_space(ring);
+
+ return ring;
+}
+
+static void mock_ring_free(struct intel_ring *ring)
+{
+ kfree(ring);
+}
+
+static int check_ring_direction(struct intel_ring *ring,
+ u32 next, u32 prev,
+ int expected)
+{
+ int result;
+
+ result = intel_ring_direction(ring, next, prev);
+ if (result < 0)
+ result = -1;
+ else if (result > 0)
+ result = 1;
+
+ if (result != expected) {
+ pr_err("intel_ring_direction(%u, %u):%d != %d\n",
+ next, prev, result, expected);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_ring_step(struct intel_ring *ring, u32 x, u32 step)
+{
+ u32 prev = x, next = intel_ring_wrap(ring, x + step);
+ int err = 0;
+
+ err |= check_ring_direction(ring, next, next, 0);
+ err |= check_ring_direction(ring, prev, prev, 0);
+ err |= check_ring_direction(ring, next, prev, 1);
+ err |= check_ring_direction(ring, prev, next, -1);
+
+ return err;
+}
+
+static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step)
+{
+ int err = 0;
+
+ err |= check_ring_step(ring, x, step);
+ err |= check_ring_step(ring, intel_ring_wrap(ring, x + 1), step);
+ err |= check_ring_step(ring, intel_ring_wrap(ring, x - 1), step);
+
+ return err;
+}
+
+static int igt_ring_direction(void *dummy)
+{
+ struct intel_ring *ring;
+ unsigned int half = 2048;
+ int step, err = 0;
+
+ ring = mock_ring(2 * half);
+ if (!ring)
+ return -ENOMEM;
+
+ GEM_BUG_ON(ring->size != 2 * half);
+
+ /* Precision of wrap detection is limited to ring->size / 2 */
+ for (step = 1; step < half; step <<= 1) {
+ err |= check_ring_offset(ring, 0, step);
+ err |= check_ring_offset(ring, half, step);
+ }
+ err |= check_ring_step(ring, 0, half - 64);
+
+ /* And check unwrapped handling for good measure */
+ err |= check_ring_offset(ring, 0, 2 * half + 64);
+ err |= check_ring_offset(ring, 3 * half, 1);
+
+ mock_ring_free(ring);
+ return err;
+}
+
+int intel_ring_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_ring_direction),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
new file mode 100644
index 000000000..70f9ac1ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static struct i915_vma *create_wally(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ err = i915_vma_sync(vma);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(cs);
+ }
+
+ if (GRAPHICS_VER(engine->i915) >= 6) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = 0;
+ } else if (GRAPHICS_VER(engine->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ }
+ *cs++ = vma->node.start + 4000;
+ *cs++ = STACK_MAGIC;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ vma = ERR_CAST(vma->private);
+ i915_gem_object_put(obj);
+ }
+
+ return vma;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int new_context_sync(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = context_sync(ce);
+ intel_context_put(ce);
+
+ return err;
+}
+
+static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
+{
+ int pass;
+ int err;
+
+ for (pass = 0; pass < 2; pass++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(engine->kernel_context);
+ if (err || READ_ONCE(*result)) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb emitted for the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+
+ err = context_sync(engine->kernel_context);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
+{
+ struct i915_vma *bb;
+ u32 *result;
+ int err;
+
+ bb = create_wally(engine);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ result = i915_gem_object_pin_map_unlocked(bb->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ intel_context_put(bb->private);
+ i915_vma_unpin_and_release(&bb, 0);
+ return PTR_ERR(result);
+ }
+ result += 1000;
+
+ engine->wa_ctx.vma = bb;
+
+ err = mixed_contexts_sync(engine, result);
+ if (err)
+ goto out;
+
+ err = double_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+ err = kernel_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+out:
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_ctx_switch_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Exercise the inter-context wa batch.
+ *
+ * Between each user context we run a wa batch, and since it may
+ * have implications for user visible state, we have to check that
+ * we do actually execute it.
+ *
+ * The trick we use is to replace the normal wa batch with a custom
+ * one that writes to a marker within it, and we can then look for
+ * that marker to confirm if the batch was run when we expect it,
+ * and equally important it was wasn't run when we don't!
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_vma *saved_wa;
+ int err;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (IS_GRAPHICS_VER(gt->i915, 4, 5))
+ continue; /* MI_STORE_DWORD is privileged! */
+
+ saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+
+ intel_engine_pm_get(engine);
+ err = __live_ctx_switch_wa(engine);
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ engine->wa_ctx.vma = saved_wa;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_ctx_switch_wa),
+ };
+
+ if (to_gt(i915)->submission_method > INTEL_SUBMISSION_RING)
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
new file mode 100644
index 000000000..cfb4708dd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -0,0 +1,1319 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/sort.h>
+
+#include "gem/i915_gem_internal.h"
+
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt_clock_utils.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "selftest_engine_heartbeat.h"
+#include "selftest_rps.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/librapl.h"
+
+/* Try to isolate the impact of cstates from determing frequency response */
+#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
+
+static void dummy_rps_work(struct work_struct *wrk)
+{
+}
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const u64 *a = A, *b = B;
+
+ if (*a < *b)
+ return -1;
+ else if (*a > *b)
+ return 1;
+ else
+ return 0;
+}
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ if (*a < *b)
+ return -1;
+ else if (*a > *b)
+ return 1;
+ else
+ return 0;
+}
+
+static struct i915_vma *
+create_spin_counter(struct intel_engine_cs *engine,
+ struct i915_address_space *vm,
+ bool srm,
+ u32 **cancel,
+ u32 **counter)
+{
+ enum {
+ COUNT,
+ INC,
+ __NGPR__,
+ };
+#define CS_GPR(x) GEN8_RING_CS_GPR(engine->mmio_base, x)
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned long end;
+ u32 *base, *cs;
+ int loop, i;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, 64 << 10);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ end = obj->base.size / sizeof(u32) - 1;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unlock;
+
+ i915_vma_lock(vma);
+
+ base = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(base)) {
+ err = PTR_ERR(base);
+ goto err_unpin;
+ }
+ cs = base;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(__NGPR__ * 2);
+ for (i = 0; i < __NGPR__; i++) {
+ *cs++ = i915_mmio_reg_offset(CS_GPR(i));
+ *cs++ = 0;
+ *cs++ = i915_mmio_reg_offset(CS_GPR(i)) + 4;
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(INC));
+ *cs++ = 1;
+
+ loop = cs - base;
+
+ /* Unroll the loop to avoid MI_BB_START stalls impacting measurements */
+ for (i = 0; i < 1024; i++) {
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(COUNT));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(INC));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(COUNT), MI_MATH_REG_ACCU);
+
+ if (srm) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
+ *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
+ *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+ }
+ }
+
+ *cs++ = MI_BATCH_BUFFER_START_GEN8;
+ *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
+ *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+ GEM_BUG_ON(cs - base > end);
+
+ i915_gem_object_flush_map(obj);
+
+ *cancel = base + loop;
+ *counter = srm ? memset32(base + end, 0, 1) : NULL;
+ return vma;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err_unlock:
+ i915_vma_unlock(vma);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms)
+{
+ u8 history[64], i;
+ unsigned long end;
+ int sleep;
+
+ i = 0;
+ memset(history, freq, sizeof(history));
+ sleep = 20;
+
+ /* The PCU does not change instantly, but drifts towards the goal? */
+ end = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ u8 act;
+
+ act = read_cagf(rps);
+ if (time_after(jiffies, end))
+ return act;
+
+ /* Target acquired */
+ if (act == freq)
+ return act;
+
+ /* Any change within the last N samples? */
+ if (!memchr_inv(history, act, sizeof(history)))
+ return act;
+
+ history[i] = act;
+ i = (i + 1) % ARRAY_SIZE(history);
+
+ usleep_range(sleep, 2 * sleep);
+ sleep *= 2;
+ if (sleep > timeout_ms * 20)
+ sleep = timeout_ms * 20;
+ } while (1);
+}
+
+static u8 rps_set_check(struct intel_rps *rps, u8 freq)
+{
+ mutex_lock(&rps->lock);
+ GEM_BUG_ON(!intel_rps_is_active(rps));
+ if (wait_for(!intel_rps_set(rps, freq), 50)) {
+ mutex_unlock(&rps->lock);
+ return 0;
+ }
+ GEM_BUG_ON(rps->last_freq != freq);
+ mutex_unlock(&rps->lock);
+
+ return wait_for_freq(rps, freq, 50);
+}
+
+static void show_pstate_limits(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (IS_BROXTON(i915)) {
+ pr_info("P_STATE_CAP[%x]: 0x%08x\n",
+ i915_mmio_reg_offset(BXT_RP_STATE_CAP),
+ intel_uncore_read(rps_to_uncore(rps),
+ BXT_RP_STATE_CAP));
+ } else if (GRAPHICS_VER(i915) == 9) {
+ pr_info("P_STATE_LIMITS[%x]: 0x%08x\n",
+ i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS),
+ intel_uncore_read(rps_to_uncore(rps),
+ GEN9_RP_STATE_LIMITS));
+ }
+}
+
+int live_rps_clock_interval(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ intel_gt_pm_get(gt);
+ intel_rps_disable(&gt->rps);
+
+ intel_gt_check_clock_frequency(gt);
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ u32 cycles;
+ u64 dt;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ st_engine_heartbeat_enable(engine);
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CUR_UP_EI, 0);
+
+ /* Set the evaluation interval to infinity! */
+ intel_uncore_write_fw(gt->uncore,
+ GEN6_RP_UP_EI, 0xffffffff);
+ intel_uncore_write_fw(gt->uncore,
+ GEN6_RP_UP_THRESHOLD, 0xffffffff);
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL,
+ GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG);
+
+ if (wait_for(intel_uncore_read_fw(gt->uncore,
+ GEN6_RP_CUR_UP_EI),
+ 10)) {
+ /* Just skip the test; assume lack of HW support */
+ pr_notice("%s: rps evaluation interval not ticking\n",
+ engine->name);
+ err = -ENODEV;
+ } else {
+ ktime_t dt_[5];
+ u32 cycles_[5];
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ preempt_disable();
+
+ dt_[i] = ktime_get();
+ cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+
+ udelay(1000);
+
+ dt_[i] = ktime_sub(ktime_get(), dt_[i]);
+ cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+
+ preempt_enable();
+ }
+
+ /* Use the median of both cycle/dt; close enough */
+ sort(cycles_, 5, sizeof(*cycles_), cmp_u32, NULL);
+ cycles = (cycles_[1] + 2 * cycles_[2] + cycles_[3]) / 4;
+ sort(dt_, 5, sizeof(*dt_), cmp_u64, NULL);
+ dt = div_u64(dt_[1] + 2 * dt_[2] + dt_[3], 4);
+ }
+
+ intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL, 0);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+
+ if (err == 0) {
+ u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
+ u32 expected =
+ intel_gt_ns_to_pm_interval(gt, dt);
+
+ pr_info("%s: rps counted %d C0 cycles [%lldns] in %lldns [%d cycles], using GT clock frequency of %uKHz\n",
+ engine->name, cycles, time, dt, expected,
+ gt->clock_frequency / 1000);
+
+ if (10 * time < 8 * dt ||
+ 8 * time > 10 * dt) {
+ pr_err("%s: rps clock time does not match walltime!\n",
+ engine->name);
+ err = -EINVAL;
+ }
+
+ if (10 * expected < 8 * cycles ||
+ 8 * expected > 10 * cycles) {
+ pr_err("%s: walltime does not match rps clock ticks!\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ break; /* once is enough */
+ }
+
+ intel_rps_enable(&gt->rps);
+ intel_gt_pm_put(gt);
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (err == -ENODEV) /* skipped, don't report a fail */
+ err = 0;
+
+ return err;
+}
+
+int live_rps_control(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Check that the actual frequency matches our requested frequency,
+ * to verify our control mechanism. We have to be careful that the
+ * PCU may throttle the GPU in which case the actual frequency used
+ * will be lowered than requested.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (IS_CHERRYVIEW(gt->i915)) /* XXX fragile PCU */
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ intel_gt_pm_get(gt);
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ ktime_t min_dt, max_dt;
+ int f, limit;
+ int min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ if (rps_set_check(rps, rps->min_freq) != rps->min_freq) {
+ pr_err("%s: could not set minimum frequency [%x], only %x!\n",
+ engine->name, rps->min_freq, read_cagf(rps));
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+ show_pstate_limits(rps);
+ err = -EINVAL;
+ break;
+ }
+
+ for (f = rps->min_freq + 1; f < rps->max_freq; f++) {
+ if (rps_set_check(rps, f) < f)
+ break;
+ }
+
+ limit = rps_set_check(rps, f);
+
+ if (rps_set_check(rps, rps->min_freq) != rps->min_freq) {
+ pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
+ engine->name, rps->min_freq, read_cagf(rps));
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+ show_pstate_limits(rps);
+ err = -EINVAL;
+ break;
+ }
+
+ max_dt = ktime_get();
+ max = rps_set_check(rps, limit);
+ max_dt = ktime_sub(ktime_get(), max_dt);
+
+ min_dt = ktime_get();
+ min = rps_set_check(rps, rps->min_freq);
+ min_dt = ktime_sub(ktime_get(), min_dt);
+
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+
+ pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
+ engine->name,
+ rps->min_freq, intel_gpu_freq(rps, rps->min_freq),
+ rps->max_freq, intel_gpu_freq(rps, rps->max_freq),
+ limit, intel_gpu_freq(rps, limit),
+ min, max, ktime_to_ns(min_dt), ktime_to_ns(max_dt));
+
+ if (limit == rps->min_freq) {
+ pr_err("%s: GPU throttled to minimum!\n",
+ engine->name);
+ show_pstate_limits(rps);
+ err = -ENODEV;
+ break;
+ }
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+ intel_gt_pm_put(gt);
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+static void show_pcu_config(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
+ int gpu_freq;
+
+ if (!HAS_LLC(i915))
+ return;
+
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (GRAPHICS_VER(i915) >= 9) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ wakeref = intel_runtime_pm_get(rps_to_uncore(rps)->rpm);
+
+ pr_info("%5s %5s %5s\n", "GPU", "eCPU", "eRing");
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+ int ia_freq = gpu_freq;
+
+ snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq, NULL);
+
+ pr_info("%5d %5d %5d\n",
+ gpu_freq * 50,
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
+ }
+
+ intel_runtime_pm_put(rps_to_uncore(rps)->rpm, wakeref);
+}
+
+static u64 __measure_frequency(u32 *cntr, int duration_ms)
+{
+ u64 dc, dt;
+
+ dt = ktime_get();
+ dc = READ_ONCE(*cntr);
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dc = READ_ONCE(*cntr) - dc;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dc, dt);
+}
+
+static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_frequency(cntr, 2);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+static u64 __measure_cs_frequency(struct intel_engine_cs *engine,
+ int duration_ms)
+{
+ u64 dc, dt;
+
+ dt = ktime_get();
+ dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0));
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dc, dt);
+}
+
+static u64 measure_cs_frequency_at(struct intel_rps *rps,
+ struct intel_engine_cs *engine,
+ int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_cs_frequency(engine, 2);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+static bool scaled_within(u64 x, u64 y, u32 f_n, u32 f_d)
+{
+ return f_d * x > f_n * y && f_n * x < f_d * y;
+}
+
+int live_rps_frequency_cs(void *arg)
+{
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * The premise is that the GPU does change frequency at our behest.
+ * Let's check there is a correspondence between the requested
+ * frequency, the actual frequency, and the observed clock rate.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
+ return 0;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_add_request(&qos, CPU_LATENCY);
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cancel, *cntr;
+ struct {
+ u64 count;
+ int freq;
+ } min, max;
+
+ st_engine_heartbeat_disable(engine);
+
+ vma = create_spin_counter(engine,
+ engine->kernel_context->vm, false,
+ &cancel, &cntr);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ st_engine_heartbeat_enable(engine);
+ break;
+ }
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_request_add(rq);
+ if (err)
+ goto err_vma;
+
+ if (wait_for(intel_uncore_read(engine->uncore, CS_GPR(0)),
+ 10)) {
+ pr_err("%s: timed loop did not start\n",
+ engine->name);
+ goto err_vma;
+ }
+
+ min.freq = rps->min_freq;
+ min.count = measure_cs_frequency_at(rps, engine, &min.freq);
+
+ max.freq = rps->max_freq;
+ max.count = measure_cs_frequency_at(rps, engine, &max.freq);
+
+ pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n",
+ engine->name,
+ min.count, intel_gpu_freq(rps, min.freq),
+ max.count, intel_gpu_freq(rps, max.freq),
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count,
+ max.freq * min.count));
+
+ if (!scaled_within(max.freq * min.count,
+ min.freq * max.count,
+ 2, 3)) {
+ int f;
+
+ pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n",
+ engine->name,
+ max.freq * min.count,
+ min.freq * max.count);
+ show_pcu_config(rps);
+
+ for (f = min.freq + 1; f <= rps->max_freq; f++) {
+ int act = f;
+ u64 count;
+
+ count = measure_cs_frequency_at(rps, engine, &act);
+ if (act < f)
+ break;
+
+ pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n",
+ engine->name,
+ act, intel_gpu_freq(rps, act), count,
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count,
+ act * min.count));
+
+ f = act; /* may skip ahead [pcu granularity] */
+ }
+
+ err = -EINTR; /* ignore error, continue on with test */
+ }
+
+err_vma:
+ *cancel = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+ i915_vma_unpin(vma);
+ i915_vma_unlock(vma);
+ i915_vma_put(vma);
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_remove_request(&qos);
+
+ return err;
+}
+
+int live_rps_frequency_srm(void *arg)
+{
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * The premise is that the GPU does change frequency at our behest.
+ * Let's check there is a correspondence between the requested
+ * frequency, the actual frequency, and the observed clock rate.
+ */
+
+ if (!intel_rps_is_enabled(rps))
+ return 0;
+
+ if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */
+ return 0;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_add_request(&qos, CPU_LATENCY);
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cancel, *cntr;
+ struct {
+ u64 count;
+ int freq;
+ } min, max;
+
+ st_engine_heartbeat_disable(engine);
+
+ vma = create_spin_counter(engine,
+ engine->kernel_context->vm, true,
+ &cancel, &cntr);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ st_engine_heartbeat_enable(engine);
+ break;
+ }
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_request_add(rq);
+ if (err)
+ goto err_vma;
+
+ if (wait_for(READ_ONCE(*cntr), 10)) {
+ pr_err("%s: timed loop did not start\n",
+ engine->name);
+ goto err_vma;
+ }
+
+ min.freq = rps->min_freq;
+ min.count = measure_frequency_at(rps, cntr, &min.freq);
+
+ max.freq = rps->max_freq;
+ max.count = measure_frequency_at(rps, cntr, &max.freq);
+
+ pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n",
+ engine->name,
+ min.count, intel_gpu_freq(rps, min.freq),
+ max.count, intel_gpu_freq(rps, max.freq),
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count,
+ max.freq * min.count));
+
+ if (!scaled_within(max.freq * min.count,
+ min.freq * max.count,
+ 1, 2)) {
+ int f;
+
+ pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n",
+ engine->name,
+ max.freq * min.count,
+ min.freq * max.count);
+ show_pcu_config(rps);
+
+ for (f = min.freq + 1; f <= rps->max_freq; f++) {
+ int act = f;
+ u64 count;
+
+ count = measure_frequency_at(rps, cntr, &act);
+ if (act < f)
+ break;
+
+ pr_info("%s: %x:%uMHz: %lluKHz [%d%%]\n",
+ engine->name,
+ act, intel_gpu_freq(rps, act), count,
+ (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count,
+ act * min.count));
+
+ f = act; /* may skip ahead [pcu granularity] */
+ }
+
+ err = -EINTR; /* ignore error, continue on with test */
+ }
+
+err_vma:
+ *cancel = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+ i915_vma_unpin(vma);
+ i915_vma_unlock(vma);
+ i915_vma_put(vma);
+
+ st_engine_heartbeat_enable(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ if (CPU_LATENCY >= 0)
+ cpu_latency_qos_remove_request(&qos);
+
+ return err;
+}
+
+static void sleep_for_ei(struct intel_rps *rps, int timeout_us)
+{
+ /* Flush any previous EI */
+ usleep_range(timeout_us, 2 * timeout_us);
+
+ /* Reset the interrupt status */
+ rps_disable_interrupts(rps);
+ GEM_BUG_ON(rps->pm_iir);
+ rps_enable_interrupts(rps);
+
+ /* And then wait for the timeout, for real this time */
+ usleep_range(2 * timeout_us, 3 * timeout_us);
+}
+
+static int __rps_up_interrupt(struct intel_rps *rps,
+ struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ struct i915_request *rq;
+ u32 timeout;
+
+ if (!intel_engine_can_store_dword(engine))
+ return 0;
+
+ rps_set_check(rps, rps->min_freq);
+
+ rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ i915_request_put(rq);
+ intel_gt_set_wedged(engine->gt);
+ return -EIO;
+ }
+
+ if (!intel_rps_is_active(rps)) {
+ pr_err("%s: RPS not enabled on starting spinner\n",
+ engine->name);
+ igt_spinner_end(spin);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) {
+ pr_err("%s: RPS did not register UP interrupt\n",
+ engine->name);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ if (rps->last_freq != rps->min_freq) {
+ pr_err("%s: RPS did not program min frequency\n",
+ engine->name);
+ i915_request_put(rq);
+ return -EINVAL;
+ }
+
+ timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI);
+ timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout);
+ timeout = DIV_ROUND_UP(timeout, 1000);
+
+ sleep_for_ei(rps, timeout);
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ igt_spinner_end(spin);
+ i915_request_put(rq);
+
+ if (rps->cur_freq != rps->min_freq) {
+ pr_err("%s: Frequency unexpectedly changed [up], now %d!\n",
+ engine->name, intel_rps_read_actual_frequency(rps));
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) {
+ pr_err("%s: UP interrupt not recorded for spinner, pm_iir:%x, prev_up:%x, up_threshold:%x, up_ei:%x\n",
+ engine->name, rps->pm_iir,
+ intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+ intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_UP_EI));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __rps_down_interrupt(struct intel_rps *rps,
+ struct intel_engine_cs *engine)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ u32 timeout;
+
+ rps_set_check(rps, rps->max_freq);
+
+ if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) {
+ pr_err("%s: RPS did not register DOWN interrupt\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ if (rps->last_freq != rps->max_freq) {
+ pr_err("%s: RPS did not program max frequency\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
+ timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout);
+ timeout = DIV_ROUND_UP(timeout, 1000);
+
+ sleep_for_ei(rps, timeout);
+
+ if (rps->cur_freq != rps->max_freq) {
+ pr_err("%s: Frequency unexpectedly changed [down], now %d!\n",
+ engine->name,
+ intel_rps_read_actual_frequency(rps));
+ return -EINVAL;
+ }
+
+ if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) {
+ pr_err("%s: DOWN interrupt not recorded for idle, pm_iir:%x, prev_down:%x, down_threshold:%x, down_ei:%x [prev_up:%x, up_threshold:%x, up_ei:%x]\n",
+ engine->name, rps->pm_iir,
+ intel_uncore_read(uncore, GEN6_RP_PREV_DOWN),
+ intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_DOWN_EI),
+ intel_uncore_read(uncore, GEN6_RP_PREV_UP),
+ intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
+ intel_uncore_read(uncore, GEN6_RP_UP_EI));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int live_rps_interrupt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ u32 pm_events;
+ int err = 0;
+
+ /*
+ * First, let's check whether or not we are receiving interrupts.
+ */
+
+ if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
+ return 0;
+
+ intel_gt_pm_get(gt);
+ pm_events = rps->pm_events;
+ intel_gt_pm_put(gt);
+ if (!pm_events) {
+ pr_err("No RPS PM events registered, but RPS is enabled?\n");
+ return -ENODEV;
+ }
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ /* Keep the engine busy with a spinner; expect an UP! */
+ if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
+ intel_gt_pm_wait_for_idle(engine->gt);
+ GEM_BUG_ON(intel_rps_is_active(rps));
+
+ st_engine_heartbeat_disable(engine);
+
+ err = __rps_up_interrupt(rps, engine, &spin);
+
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto out;
+
+ intel_gt_pm_wait_for_idle(engine->gt);
+ }
+
+ /* Keep the engine awake but idle and check for DOWN */
+ if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
+ st_engine_heartbeat_disable(engine);
+ intel_rc6_disable(&gt->rc6);
+
+ err = __rps_down_interrupt(rps, engine);
+
+ intel_rc6_enable(&gt->rc6);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+static u64 __measure_power(int duration_ms)
+{
+ u64 dE, dt;
+
+ dt = ktime_get();
+ dE = librapl_energy_uJ();
+ usleep_range(1000 * duration_ms, 2000 * duration_ms);
+ dE = librapl_energy_uJ() - dE;
+ dt = ktime_get() - dt;
+
+ return div64_u64(1000 * 1000 * dE, dt);
+}
+
+static u64 measure_power_at(struct intel_rps *rps, int *freq)
+{
+ u64 x[5];
+ int i;
+
+ *freq = rps_set_check(rps, *freq);
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_power(5);
+ *freq = (*freq + read_cagf(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
+int live_rps_power(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ void (*saved_work)(struct work_struct *wrk);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * Our fundamental assumption is that running at lower frequency
+ * actually saves power. Let's see if our RAPL measurement support
+ * that theory.
+ */
+
+ if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
+ return 0;
+
+ if (!librapl_supported(gt->i915))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ intel_gt_pm_wait_for_idle(gt);
+ saved_work = rps->work.func;
+ rps->work.func = dummy_rps_work;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ struct {
+ u64 power;
+ int freq;
+ } min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ st_engine_heartbeat_enable(engine);
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: RPS spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ max.freq = rps->max_freq;
+ max.power = measure_power_at(rps, &max.freq);
+
+ min.freq = rps->min_freq;
+ min.power = measure_power_at(rps, &min.freq);
+
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+
+ pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
+ engine->name,
+ min.power, intel_gpu_freq(rps, min.freq),
+ max.power, intel_gpu_freq(rps, max.freq));
+
+ if (10 * min.freq >= 9 * max.freq) {
+ pr_notice("Could not control frequency, ran at [%d:%uMHz, %d:%uMhz]\n",
+ min.freq, intel_gpu_freq(rps, min.freq),
+ max.freq, intel_gpu_freq(rps, max.freq));
+ continue;
+ }
+
+ if (11 * min.power > 10 * max.power) {
+ pr_err("%s: did not conserve power when setting lower frequency!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+
+ intel_gt_pm_wait_for_idle(gt);
+ rps->work.func = saved_work;
+
+ return err;
+}
+
+int live_rps_dynamic(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We've looked at the bascs, and have established that we
+ * can change the clock frequency and that the HW will generate
+ * interrupts based on load. Now we check how we integrate those
+ * moving parts into dynamic reclocking based on load.
+ */
+
+ if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ if (intel_rps_has_interrupts(rps))
+ pr_info("RPS has interrupt support\n");
+ if (intel_rps_uses_timer(rps))
+ pr_info("RPS has timer support\n");
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ struct {
+ ktime_t dt;
+ u8 freq;
+ } min, max;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ intel_gt_pm_wait_for_idle(gt);
+ GEM_BUG_ON(intel_rps_is_active(rps));
+ rps->cur_freq = rps->min_freq;
+
+ intel_engine_pm_get(engine);
+ intel_rc6_disable(&gt->rc6);
+ GEM_BUG_ON(rps->last_freq != rps->min_freq);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ i915_request_add(rq);
+
+ max.dt = ktime_get();
+ max.freq = wait_for_freq(rps, rps->max_freq, 500);
+ max.dt = ktime_sub(ktime_get(), max.dt);
+
+ igt_spinner_end(&spin);
+
+ min.dt = ktime_get();
+ min.freq = wait_for_freq(rps, rps->min_freq, 2000);
+ min.dt = ktime_sub(ktime_get(), min.dt);
+
+ pr_info("%s: dynamically reclocked to %u:%uMHz while busy in %lluns, and %u:%uMHz while idle in %lluns\n",
+ engine->name,
+ max.freq, intel_gpu_freq(rps, max.freq),
+ ktime_to_ns(max.dt),
+ min.freq, intel_gpu_freq(rps, min.freq),
+ ktime_to_ns(min.dt));
+ if (min.freq >= max.freq) {
+ pr_err("%s: dynamic reclocking of spinner failed\n!",
+ engine->name);
+ err = -EINVAL;
+ }
+
+err:
+ intel_rc6_enable(&gt->rc6);
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.h b/drivers/gpu/drm/i915/gt/selftest_rps.h
new file mode 100644
index 000000000..6e82a631c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_RPS_H
+#define SELFTEST_RPS_H
+
+int live_rps_control(void *arg);
+int live_rps_clock_interval(void *arg);
+int live_rps_frequency_cs(void *arg);
+int live_rps_frequency_srm(void *arg);
+int live_rps_power(void *arg);
+int live_rps_interrupt(void *arg);
+int live_rps_dynamic(void *arg);
+
+#endif /* SELFTEST_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
new file mode 100644
index 000000000..f8a1d27df
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#define NUM_STEPS 5
+#define H2G_DELAY 50000
+#define delay_for_h2g() usleep_range(H2G_DELAY, H2G_DELAY + 10000)
+#define FREQUENCY_REQ_UNIT DIV_ROUND_CLOSEST(GT_FREQUENCY_MULTIPLIER, \
+ GEN9_FREQ_SCALER)
+enum test_type {
+ VARY_MIN,
+ VARY_MAX,
+ MAX_GRANTED
+};
+
+static int slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 freq)
+{
+ int ret;
+
+ ret = intel_guc_slpc_set_min_freq(slpc, freq);
+ if (ret)
+ pr_err("Could not set min frequency to [%u]\n", freq);
+ else /* Delay to ensure h2g completes */
+ delay_for_h2g();
+
+ return ret;
+}
+
+static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
+{
+ int ret;
+
+ ret = intel_guc_slpc_set_max_freq(slpc, freq);
+ if (ret)
+ pr_err("Could not set maximum frequency [%u]\n",
+ freq);
+ else /* Delay to ensure h2g completes */
+ delay_for_h2g();
+
+ return ret;
+}
+
+static int vary_max_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
+ u32 *max_act_freq)
+{
+ u32 step, max_freq, req_freq;
+ u32 act_freq;
+ int err = 0;
+
+ /* Go from max to min in 5 steps */
+ step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
+ *max_act_freq = slpc->min_freq;
+ for (max_freq = slpc->rp0_freq; max_freq > slpc->min_freq;
+ max_freq -= step) {
+ err = slpc_set_max_freq(slpc, max_freq);
+ if (err)
+ break;
+
+ req_freq = intel_rps_read_punit_req_frequency(rps);
+
+ /* GuC requests freq in multiples of 50/3 MHz */
+ if (req_freq > (max_freq + FREQUENCY_REQ_UNIT)) {
+ pr_err("SWReq is %d, should be at most %d\n", req_freq,
+ max_freq + FREQUENCY_REQ_UNIT);
+ err = -EINVAL;
+ }
+
+ act_freq = intel_rps_read_actual_frequency(rps);
+ if (act_freq > *max_act_freq)
+ *max_act_freq = act_freq;
+
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
+ u32 *max_act_freq)
+{
+ u32 step, min_freq, req_freq;
+ u32 act_freq;
+ int err = 0;
+
+ /* Go from min to max in 5 steps */
+ step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
+ *max_act_freq = slpc->min_freq;
+ for (min_freq = slpc->min_freq; min_freq < slpc->rp0_freq;
+ min_freq += step) {
+ err = slpc_set_min_freq(slpc, min_freq);
+ if (err)
+ break;
+
+ req_freq = intel_rps_read_punit_req_frequency(rps);
+
+ /* GuC requests freq in multiples of 50/3 MHz */
+ if (req_freq < (min_freq - FREQUENCY_REQ_UNIT)) {
+ pr_err("SWReq is %d, should be at least %d\n", req_freq,
+ min_freq - FREQUENCY_REQ_UNIT);
+ err = -EINVAL;
+ }
+
+ act_freq = intel_rps_read_actual_frequency(rps);
+ if (act_freq > *max_act_freq)
+ *max_act_freq = act_freq;
+
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, u32 *max_act_freq)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ u32 perf_limit_reasons;
+ int err = 0;
+
+ err = slpc_set_min_freq(slpc, slpc->rp0_freq);
+ if (err)
+ return err;
+
+ *max_act_freq = intel_rps_read_actual_frequency(rps);
+ if (*max_act_freq != slpc->rp0_freq) {
+ /* Check if there was some throttling by pcode */
+ perf_limit_reasons = intel_uncore_read(gt->uncore, GT0_PERF_LIMIT_REASONS);
+
+ /* If not, this is an error */
+ if (!(perf_limit_reasons & GT0_PERF_LIMIT_REASONS_MASK)) {
+ pr_err("Pcode did not grant max freq\n");
+ err = -EINVAL;
+ } else {
+ pr_info("Pcode throttled frequency 0x%x\n", perf_limit_reasons);
+ }
+ }
+
+ return err;
+}
+
+static int run_test(struct intel_gt *gt, int test_type)
+{
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ struct intel_rps *rps = &gt->rps;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ u32 slpc_min_freq, slpc_max_freq;
+ int err = 0;
+
+ if (!intel_uc_uses_guc_slpc(&gt->uc))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ if (intel_guc_slpc_get_max_freq(slpc, &slpc_max_freq)) {
+ pr_err("Could not get SLPC max freq\n");
+ return -EIO;
+ }
+
+ if (intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq)) {
+ pr_err("Could not get SLPC min freq\n");
+ return -EIO;
+ }
+
+ /*
+ * FIXME: With efficient frequency enabled, GuC can request
+ * frequencies higher than the SLPC max. While this is fixed
+ * in GuC, we level set these tests with RPn as min.
+ */
+ err = slpc_set_min_freq(slpc, slpc->min_freq);
+ if (err)
+ return err;
+
+ if (slpc->min_freq == slpc->rp0_freq) {
+ pr_err("Min/Max are fused to the same value\n");
+ return -EINVAL;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ intel_gt_pm_get(gt);
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq;
+ u32 max_act_freq;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ st_engine_heartbeat_disable(engine);
+
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ st_engine_heartbeat_enable(engine);
+ break;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("%s: Spinner did not start\n",
+ engine->name);
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+ intel_gt_set_wedged(engine->gt);
+ err = -EIO;
+ break;
+ }
+
+ switch (test_type) {
+ case VARY_MIN:
+ err = vary_min_freq(slpc, rps, &max_act_freq);
+ break;
+
+ case VARY_MAX:
+ err = vary_max_freq(slpc, rps, &max_act_freq);
+ break;
+
+ case MAX_GRANTED:
+ /* Media engines have a different RP0 */
+ if (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS) {
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+ err = 0;
+ continue;
+ }
+
+ err = max_granted_freq(slpc, rps, &max_act_freq);
+ break;
+ }
+
+ pr_info("Max actual frequency for %s was %d\n",
+ engine->name, max_act_freq);
+
+ /* Actual frequency should rise above min */
+ if (max_act_freq <= slpc_min_freq) {
+ pr_err("Actual freq did not rise above min\n");
+ pr_err("Perf Limit Reasons: 0x%x\n",
+ intel_uncore_read(gt->uncore, GT0_PERF_LIMIT_REASONS));
+ err = -EINVAL;
+ }
+
+ igt_spinner_end(&spin);
+ st_engine_heartbeat_enable(engine);
+
+ if (err)
+ break;
+ }
+
+ /* Restore min/max frequencies */
+ slpc_set_max_freq(slpc, slpc_max_freq);
+ slpc_set_min_freq(slpc, slpc_min_freq);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ intel_gt_pm_put(gt);
+ igt_spinner_fini(&spin);
+ intel_gt_pm_wait_for_idle(gt);
+
+ return err;
+}
+
+static int live_slpc_vary_min(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = to_gt(i915);
+
+ return run_test(gt, VARY_MIN);
+}
+
+static int live_slpc_vary_max(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = to_gt(i915);
+
+ return run_test(gt, VARY_MAX);
+}
+
+/* check if pcode can grant RP0 */
+static int live_slpc_max_granted(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = to_gt(i915);
+
+ return run_test(gt, MAX_GRANTED);
+}
+
+int intel_slpc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_slpc_vary_max),
+ SUBTEST(live_slpc_vary_min),
+ SUBTEST(live_slpc_max_granted),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
new file mode 100644
index 000000000..522d01905
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -0,0 +1,1425 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+#include <linux/string_helpers.h>
+
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
+#include "selftest_engine_heartbeat.h"
+
+#include "../selftests/i915_random.h"
+#include "../i915_selftest.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/lib_sw_fence.h"
+#include "selftests/mock_gem_device.h"
+#include "selftests/mock_timeline.h"
+
+static struct page *hwsp_page(struct intel_timeline *tl)
+{
+ struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ return sg_page(obj->mm.pages->sgl);
+}
+
+static unsigned long hwsp_cacheline(struct intel_timeline *tl)
+{
+ unsigned long address = (unsigned long)page_address(hwsp_page(tl));
+
+ return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES;
+}
+
+static int selftest_tl_pin(struct intel_timeline *tl)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(tl->hwsp_ggtt->obj, &ww);
+ if (!err)
+ err = intel_timeline_pin(tl, &ww);
+
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ return err;
+}
+
+/* Only half of seqno's are usable, see __intel_timeline_get_seqno() */
+#define CACHELINES_PER_PAGE (PAGE_SIZE / TIMELINE_SEQNO_BYTES / 2)
+
+struct mock_hwsp_freelist {
+ struct intel_gt *gt;
+ struct radix_tree_root cachelines;
+ struct intel_timeline **history;
+ unsigned long count, max;
+ struct rnd_state prng;
+};
+
+enum {
+ SHUFFLE = BIT(0),
+};
+
+static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
+ unsigned int idx,
+ struct intel_timeline *tl)
+{
+ tl = xchg(&state->history[idx], tl);
+ if (tl) {
+ radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
+ intel_timeline_unpin(tl);
+ intel_timeline_put(tl);
+ }
+}
+
+static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
+ unsigned int count,
+ unsigned int flags)
+{
+ struct intel_timeline *tl;
+ unsigned int idx;
+
+ while (count--) {
+ unsigned long cacheline;
+ int err;
+
+ tl = intel_timeline_create(state->gt);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ err = selftest_tl_pin(tl);
+ if (err) {
+ intel_timeline_put(tl);
+ return err;
+ }
+
+ cacheline = hwsp_cacheline(tl);
+ err = radix_tree_insert(&state->cachelines, cacheline, tl);
+ if (err) {
+ if (err == -EEXIST) {
+ pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
+ cacheline);
+ }
+ intel_timeline_unpin(tl);
+ intel_timeline_put(tl);
+ return err;
+ }
+
+ idx = state->count++ % state->max;
+ __mock_hwsp_record(state, idx, tl);
+ }
+
+ if (flags & SHUFFLE)
+ i915_prandom_shuffle(state->history,
+ sizeof(*state->history),
+ min(state->count, state->max),
+ &state->prng);
+
+ count = i915_prandom_u32_max_state(min(state->count, state->max),
+ &state->prng);
+ while (count--) {
+ idx = --state->count % state->max;
+ __mock_hwsp_record(state, idx, NULL);
+ }
+
+ return 0;
+}
+
+static int mock_hwsp_freelist(void *arg)
+{
+ struct mock_hwsp_freelist state;
+ struct drm_i915_private *i915;
+ const struct {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "linear", 0 },
+ { "shuffled", SHUFFLE },
+ { },
+ }, *p;
+ unsigned int na;
+ int err = 0;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
+ state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
+
+ state.gt = to_gt(i915);
+
+ /*
+ * Create a bunch of timelines and check that their HWSP do not overlap.
+ * Free some, and try again.
+ */
+
+ state.max = PAGE_SIZE / sizeof(*state.history);
+ state.count = 0;
+ state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
+ if (!state.history) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ for (p = phases; p->name; p++) {
+ pr_debug("%s(%s)\n", __func__, p->name);
+ for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
+ err = __mock_hwsp_timeline(&state, na, p->flags);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ for (na = 0; na < state.max; na++)
+ __mock_hwsp_record(&state, na, NULL);
+ kfree(state.history);
+err_put:
+ mock_destroy_device(i915);
+ return err;
+}
+
+struct __igt_sync {
+ const char *name;
+ u32 seqno;
+ bool expected;
+ bool set;
+};
+
+static int __igt_sync(struct intel_timeline *tl,
+ u64 ctx,
+ const struct __igt_sync *p,
+ const char *name)
+{
+ int ret;
+
+ if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+ pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
+ name, p->name, ctx, p->seqno, str_yes_no(p->expected));
+ return -EINVAL;
+ }
+
+ if (p->set) {
+ ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int igt_sync(void *arg)
+{
+ const struct __igt_sync pass[] = {
+ { "unset", 0, false, false },
+ { "new", 0, false, true },
+ { "0a", 0, true, true },
+ { "1a", 1, false, true },
+ { "1b", 1, true, true },
+ { "0b", 0, true, false },
+ { "2a", 2, false, true },
+ { "4", 4, false, true },
+ { "INT_MAX", INT_MAX, false, true },
+ { "INT_MAX-1", INT_MAX-1, true, false },
+ { "INT_MAX+1", (u32)INT_MAX+1, false, true },
+ { "INT_MAX", INT_MAX, true, false },
+ { "UINT_MAX", UINT_MAX, false, true },
+ { "wrap", 0, false, true },
+ { "unwrap", UINT_MAX, true, false },
+ {},
+ }, *p;
+ struct intel_timeline tl;
+ int order, offset;
+ int ret = -ENODEV;
+
+ mock_timeline_init(&tl, 0);
+ for (p = pass; p->name; p++) {
+ for (order = 1; order < 64; order++) {
+ for (offset = -1; offset <= (order > 1); offset++) {
+ u64 ctx = BIT_ULL(order) + offset;
+
+ ret = __igt_sync(&tl, ctx, p, "1");
+ if (ret)
+ goto out;
+ }
+ }
+ }
+ mock_timeline_fini(&tl);
+
+ mock_timeline_init(&tl, 0);
+ for (order = 1; order < 64; order++) {
+ for (offset = -1; offset <= (order > 1); offset++) {
+ u64 ctx = BIT_ULL(order) + offset;
+
+ for (p = pass; p->name; p++) {
+ ret = __igt_sync(&tl, ctx, p, "2");
+ if (ret)
+ goto out;
+ }
+ }
+ }
+
+out:
+ mock_timeline_fini(&tl);
+ return ret;
+}
+
+static unsigned int random_engine(struct rnd_state *rnd)
+{
+ return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
+}
+
+static int bench_sync(void *arg)
+{
+ struct rnd_state prng;
+ struct intel_timeline tl;
+ unsigned long end_time, count;
+ u64 prng32_1M;
+ ktime_t kt;
+ int order, last_order;
+
+ mock_timeline_init(&tl, 0);
+
+ /* Lookups from cache are very fast and so the random number generation
+ * and the loop itself becomes a significant factor in the per-iteration
+ * timings. We try to compensate the results by measuring the overhead
+ * of the prng and subtract it from the reported results.
+ */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u32 x;
+
+ /* Make sure the compiler doesn't optimise away the prng call */
+ WRITE_ONCE(x, prandom_u32_state(&prng));
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_debug("%s: %lu random evaluations, %lluns/prng\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+ prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
+
+ /* Benchmark (only) setting random context ids */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u64 id = i915_prandom_u64_state(&prng);
+
+ __intel_timeline_sync_set(&tl, id, 0);
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu random insertions, %lluns/insert\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ /* Benchmark looking up the exact same context ids as we just set */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ end_time = count;
+ kt = ktime_get();
+ while (end_time--) {
+ u64 id = i915_prandom_u64_state(&prng);
+
+ if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
+ mock_timeline_fini(&tl);
+ pr_err("Lookup of %llu failed\n", id);
+ return -EINVAL;
+ }
+ }
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu random lookups, %lluns/lookup\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ mock_timeline_fini(&tl);
+ cond_resched();
+
+ mock_timeline_init(&tl, 0);
+
+ /* Benchmark setting the first N (in order) contexts */
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ __intel_timeline_sync_set(&tl, count++, 0);
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu in-order insertions, %lluns/insert\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ /* Benchmark looking up the exact same context ids as we just set */
+ end_time = count;
+ kt = ktime_get();
+ while (end_time--) {
+ if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
+ pr_err("Lookup of %lu failed\n", end_time);
+ mock_timeline_fini(&tl);
+ return -EINVAL;
+ }
+ }
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ mock_timeline_fini(&tl);
+ cond_resched();
+
+ mock_timeline_init(&tl, 0);
+
+ /* Benchmark searching for a random context id and maybe changing it */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u32 id = random_engine(&prng);
+ u32 seqno = prandom_u32_state(&prng);
+
+ if (!__intel_timeline_sync_is_later(&tl, id, seqno))
+ __intel_timeline_sync_set(&tl, id, seqno);
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+ mock_timeline_fini(&tl);
+ cond_resched();
+
+ /* Benchmark searching for a known context id and changing the seqno */
+ for (last_order = 1, order = 1; order < 32;
+ ({ int tmp = last_order; last_order = order; order += tmp; })) {
+ unsigned int mask = BIT(order) - 1;
+
+ mock_timeline_init(&tl, 0);
+
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ /* Without assuming too many details of the underlying
+ * implementation, try to identify its phase-changes
+ * (if any)!
+ */
+ u64 id = (u64)(count & mask) << order;
+
+ __intel_timeline_sync_is_later(&tl, id, 0);
+ __intel_timeline_sync_set(&tl, id, 0);
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
+ __func__, count, order,
+ (long long)div64_ul(ktime_to_ns(kt), count));
+ mock_timeline_fini(&tl);
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int intel_timeline_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(mock_hwsp_freelist),
+ SUBTEST(igt_sync),
+ SUBTEST(bench_sync),
+ };
+
+ return i915_subtests(tests, NULL);
+}
+
+static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (GRAPHICS_VER(rq->engine->i915) >= 8) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = 0;
+ *cs++ = value;
+ } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ *cs++ = addr;
+ *cs++ = value;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = addr;
+ *cs++ = value;
+ *cs++ = MI_NOOP;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static struct i915_request *
+checked_tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = selftest_tl_pin(tl);
+ if (err) {
+ rq = ERR_PTR(err);
+ goto out;
+ }
+
+ if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
+ pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
+ *tl->hwsp_seqno, tl->seqno);
+ intel_timeline_unpin(tl);
+ return ERR_PTR(-EINVAL);
+ }
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ goto out_unpin;
+
+ i915_request_get(rq);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ rq = ERR_PTR(err);
+ }
+
+out_unpin:
+ intel_timeline_unpin(tl);
+out:
+ if (IS_ERR(rq))
+ pr_err("Failed to write to timeline!\n");
+ return rq;
+}
+
+static int live_hwsp_engine(void *arg)
+{
+#define NUM_TIMELINES 4096
+ struct intel_gt *gt = arg;
+ struct intel_timeline **timelines;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count, n;
+ int err = 0;
+
+ /*
+ * Create a bunch of timelines and check we can write
+ * independently to each of their breadcrumb slots.
+ */
+
+ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+ sizeof(*timelines),
+ GFP_KERNEL);
+ if (!timelines)
+ return -ENOMEM;
+
+ count = 0;
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ intel_engine_pm_get(engine);
+
+ for (n = 0; n < NUM_TIMELINES; n++) {
+ struct intel_timeline *tl;
+ struct i915_request *rq;
+
+ tl = intel_timeline_create(gt);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ break;
+ }
+
+ rq = checked_tl_write(tl, engine, count);
+ if (IS_ERR(rq)) {
+ intel_timeline_put(tl);
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ timelines[count++] = tl;
+ i915_request_put(rq);
+ }
+
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (n = 0; n < count; n++) {
+ struct intel_timeline *tl = timelines[n];
+
+ if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
+ GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
+ n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ }
+ intel_timeline_put(tl);
+ }
+
+ kvfree(timelines);
+ return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_alternate(void *arg)
+{
+#define NUM_TIMELINES 4096
+ struct intel_gt *gt = arg;
+ struct intel_timeline **timelines;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count, n;
+ int err = 0;
+
+ /*
+ * Create a bunch of timelines and check we can write
+ * independently to each of their breadcrumb slots with adjacent
+ * engines.
+ */
+
+ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+ sizeof(*timelines),
+ GFP_KERNEL);
+ if (!timelines)
+ return -ENOMEM;
+
+ count = 0;
+ for (n = 0; n < NUM_TIMELINES; n++) {
+ for_each_engine(engine, gt, id) {
+ struct intel_timeline *tl;
+ struct i915_request *rq;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ tl = intel_timeline_create(gt);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ intel_engine_pm_get(engine);
+ rq = checked_tl_write(tl, engine, count);
+ intel_engine_pm_put(engine);
+ if (IS_ERR(rq)) {
+ intel_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ timelines[count++] = tl;
+ i915_request_put(rq);
+ }
+ }
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ for (n = 0; n < count; n++) {
+ struct intel_timeline *tl = timelines[n];
+
+ if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
+ GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
+ n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ }
+ intel_timeline_put(tl);
+ }
+
+ kvfree(timelines);
+ return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_wrap(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct intel_timeline *tl;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Across a seqno wrap, we need to keep the old cacheline alive for
+ * foreign GPU references.
+ */
+
+ tl = intel_timeline_create(gt);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ if (!tl->has_initial_breadcrumb)
+ goto out_free;
+
+ err = selftest_tl_pin(tl);
+ if (err)
+ goto out_free;
+
+ for_each_engine(engine, gt, id) {
+ const u32 *hwsp_seqno[2];
+ struct i915_request *rq;
+ u32 seqno[2];
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ tl->seqno = -4u;
+
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
+ mutex_unlock(&tl->mutex);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
+ seqno[0], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[0] = tl->hwsp_seqno;
+
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
+ mutex_unlock(&tl->mutex);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
+ seqno[1], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[1] = tl->hwsp_seqno;
+
+ /* With wrap should come a new hwsp */
+ GEM_BUG_ON(seqno[1] >= seqno[0]);
+ GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ if (READ_ONCE(*hwsp_seqno[0]) != seqno[0] ||
+ READ_ONCE(*hwsp_seqno[1]) != seqno[1]) {
+ pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
+ *hwsp_seqno[0], *hwsp_seqno[1],
+ seqno[0], seqno[1]);
+ err = -EINVAL;
+ goto out;
+ }
+
+ intel_gt_retire_requests(gt); /* recycle HWSP */
+ }
+
+out:
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ intel_timeline_unpin(tl);
+out_free:
+ intel_timeline_put(tl);
+ return err;
+}
+
+static int emit_read_hwsp(struct i915_request *rq,
+ u32 seqno, u32 hwsp,
+ u32 *addr)
+{
+ const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0));
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = *addr;
+ *cs++ = 0;
+ *cs++ = seqno;
+ *addr += 4;
+
+ *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = gpr;
+ *cs++ = hwsp;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = gpr;
+ *cs++ = *addr;
+ *cs++ = 0;
+ *addr += 4;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+struct hwsp_watcher {
+ struct i915_vma *vma;
+ struct i915_request *rq;
+ u32 addr;
+ u32 *map;
+};
+
+static bool cmp_lt(u32 a, u32 b)
+{
+ return a < b;
+}
+
+static bool cmp_gte(u32 a, u32 b)
+{
+ return a >= b;
+}
+
+static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(gt->i915, SZ_2M);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ w->map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ if (IS_ERR(w->map)) {
+ i915_gem_object_put(obj);
+ return PTR_ERR(w->map);
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return PTR_ERR(vma);
+ }
+
+ w->vma = vma;
+ w->addr = i915_ggtt_offset(vma);
+ return 0;
+}
+
+static void switch_tl_lock(struct i915_request *from, struct i915_request *to)
+{
+ /* some light mutex juggling required; think co-routines */
+
+ if (from) {
+ lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie);
+ mutex_unlock(&from->context->timeline->mutex);
+ }
+
+ if (to) {
+ mutex_lock(&to->context->timeline->mutex);
+ to->cookie = lockdep_pin_lock(&to->context->timeline->mutex);
+ }
+}
+
+static int create_watcher(struct hwsp_watcher *w,
+ struct intel_engine_cs *engine,
+ int ringsz)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ ce->ring_size = ringsz;
+ w->rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(w->rq))
+ return PTR_ERR(w->rq);
+
+ w->addr = i915_ggtt_offset(w->vma);
+
+ switch_tl_lock(w->rq, NULL);
+
+ return 0;
+}
+
+static int check_watcher(struct hwsp_watcher *w, const char *name,
+ bool (*op)(u32 hwsp, u32 seqno))
+{
+ struct i915_request *rq = fetch_and_zero(&w->rq);
+ u32 offset, end;
+ int err;
+
+ GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size);
+
+ i915_request_get(rq);
+ switch_tl_lock(NULL, rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ err = -ETIME;
+ goto out;
+ }
+
+ err = 0;
+ offset = 0;
+ end = (w->addr - i915_ggtt_offset(w->vma)) / sizeof(*w->map);
+ while (offset < end) {
+ if (!op(w->map[offset + 1], w->map[offset])) {
+ pr_err("Watcher '%s' found HWSP value %x for seqno %x\n",
+ name, w->map[offset + 1], w->map[offset]);
+ err = -EINVAL;
+ }
+
+ offset += 2;
+ }
+
+out:
+ i915_request_put(rq);
+ return err;
+}
+
+static void cleanup_watcher(struct hwsp_watcher *w)
+{
+ if (w->rq) {
+ switch_tl_lock(NULL, w->rq);
+
+ i915_request_add(w->rq);
+ }
+
+ i915_vma_unpin_and_release(&w->vma, I915_VMA_RELEASE_MAP);
+}
+
+static bool retire_requests(struct intel_timeline *tl)
+{
+ struct i915_request *rq, *rn;
+
+ mutex_lock(&tl->mutex);
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (!i915_request_retire(rq))
+ break;
+ mutex_unlock(&tl->mutex);
+
+ return !i915_active_fence_isset(&tl->last_request);
+}
+
+static struct i915_request *wrap_timeline(struct i915_request *rq)
+{
+ struct intel_context *ce = rq->context;
+ struct intel_timeline *tl = ce->timeline;
+ u32 seqno = rq->fence.seqno;
+
+ while (tl->seqno >= seqno) { /* Cause a wrap */
+ i915_request_put(rq);
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ i915_request_put(rq);
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int live_hwsp_read(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct hwsp_watcher watcher[2] = {};
+ struct intel_engine_cs *engine;
+ struct intel_timeline *tl;
+ enum intel_engine_id id;
+ int err = 0;
+ int i;
+
+ /*
+ * If we take a reference to the HWSP for reading on the GPU, that
+ * read may be arbitrarily delayed (either by foreign fence or
+ * priority saturation) and a wrap can happen within 30 minutes.
+ * When the GPU read is finally submitted it should be correct,
+ * even across multiple wraps.
+ */
+
+ if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */
+ return 0;
+
+ tl = intel_timeline_create(gt);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ if (!tl->has_initial_breadcrumb)
+ goto out_free;
+
+ for (i = 0; i < ARRAY_SIZE(watcher); i++) {
+ err = setup_watcher(&watcher[i], gt);
+ if (err)
+ goto out;
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ unsigned long count = 0;
+ IGT_TIMEOUT(end_time);
+
+ /* Create a request we can use for remote reading of the HWSP */
+ err = create_watcher(&watcher[1], engine, SZ_512K);
+ if (err)
+ goto out;
+
+ do {
+ struct i915_sw_fence *submit;
+ struct i915_request *rq;
+ u32 hwsp, dummy;
+
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = create_watcher(&watcher[0], engine, SZ_4K);
+ if (err)
+ goto out;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ ce->timeline = intel_timeline_get(tl);
+
+ /* Ensure timeline is mapped, done during first pin */
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ /*
+ * Start at a new wrap, and set seqno right before another wrap,
+ * saving 30 minutes of nops
+ */
+ tl->seqno = -12u + 2 * (count & 3);
+ __intel_timeline_get_seqno(tl, &dummy);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ goto out;
+ }
+
+ err = i915_sw_fence_await_dma_fence(&rq->submit,
+ &watcher[0].rq->fence, 0,
+ GFP_KERNEL);
+ if (err < 0) {
+ i915_request_add(rq);
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ goto out;
+ }
+
+ switch_tl_lock(rq, watcher[0].rq);
+ err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
+ if (err == 0)
+ err = emit_read_hwsp(watcher[0].rq, /* before */
+ rq->fence.seqno, hwsp,
+ &watcher[0].addr);
+ switch_tl_lock(watcher[0].rq, rq);
+ if (err) {
+ i915_request_add(rq);
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ goto out;
+ }
+
+ switch_tl_lock(rq, watcher[1].rq);
+ err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
+ if (err == 0)
+ err = emit_read_hwsp(watcher[1].rq, /* after */
+ rq->fence.seqno, hwsp,
+ &watcher[1].addr);
+ switch_tl_lock(watcher[1].rq, rq);
+ if (err) {
+ i915_request_add(rq);
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ rq = wrap_timeline(rq);
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ err = i915_sw_fence_await_dma_fence(&watcher[1].rq->submit,
+ &rq->fence, 0,
+ GFP_KERNEL);
+ if (err < 0) {
+ i915_request_put(rq);
+ goto out;
+ }
+
+ err = check_watcher(&watcher[0], "before", cmp_lt);
+ i915_sw_fence_commit(submit);
+ heap_fence_put(submit);
+ if (err) {
+ i915_request_put(rq);
+ goto out;
+ }
+ count++;
+
+ /* Flush the timeline before manually wrapping again */
+ if (i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ HZ) < 0) {
+ err = -ETIME;
+ i915_request_put(rq);
+ goto out;
+ }
+ retire_requests(tl);
+ i915_request_put(rq);
+
+ /* Single requests are limited to half a ring at most */
+ if (8 * watcher[1].rq->ring->emit >
+ 3 * watcher[1].rq->ring->size)
+ break;
+
+ } while (!__igt_timeout(end_time, NULL) &&
+ count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2);
+
+ pr_info("%s: simulated %lu wraps\n", engine->name, count);
+ err = check_watcher(&watcher[1], "after", cmp_gte);
+ if (err)
+ goto out;
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(watcher); i++)
+ cleanup_watcher(&watcher[i]);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+out_free:
+ intel_timeline_put(tl);
+ return err;
+}
+
+static int live_hwsp_rollover_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Run the host for long enough, and even the kernel context will
+ * see a seqno rollover.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq[3] = {};
+ int i;
+
+ st_engine_heartbeat_disable(engine);
+ if (intel_gt_wait_for_idle(gt, HZ / 2)) {
+ err = -EIO;
+ goto out;
+ }
+
+ GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
+ tl->seqno = -2u;
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ struct i915_request *this;
+
+ this = i915_request_create(ce);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out;
+ }
+
+ pr_debug("%s: create fence.seqnp:%d\n",
+ engine->name,
+ lower_32_bits(this->fence.seqno));
+
+ GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
+ }
+
+ /* We expected a wrap! */
+ GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline wrap timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ if (!i915_request_completed(rq[i])) {
+ pr_err("Pre-wrap request not completed!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(rq); i++)
+ i915_request_put(rq[i]);
+ st_engine_heartbeat_enable(engine);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
+static int live_hwsp_rollover_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Simulate a long running user context, and force the seqno wrap
+ * on the user's timeline.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq[3] = {};
+ struct intel_timeline *tl;
+ struct intel_context *ce;
+ int i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_alloc_state(ce);
+ if (err)
+ goto out;
+
+ tl = ce->timeline;
+ if (!tl->has_initial_breadcrumb)
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto out;
+
+ tl->seqno = -4u;
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ struct i915_request *this;
+
+ this = intel_context_create_request(ce);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out_unpin;
+ }
+
+ pr_debug("%s: create fence.seqnp:%d\n",
+ engine->name,
+ lower_32_bits(this->fence.seqno));
+
+ GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
+ }
+
+ /* We expected a wrap! */
+ GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline wrap timed out!\n");
+ err = -EIO;
+ goto out_unpin;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ if (!i915_request_completed(rq[i])) {
+ pr_err("Pre-wrap request not completed!\n");
+ err = -EINVAL;
+ goto out_unpin;
+ }
+ }
+out_unpin:
+ intel_context_unpin(ce);
+out:
+ for (i = 0; i < ARRAY_SIZE(rq); i++)
+ i915_request_put(rq[i]);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
+static int live_hwsp_recycle(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ /*
+ * Check seqno writes into one timeline at a time. We expect to
+ * recycle the breadcrumb slot between iterations and neither
+ * want to confuse ourselves or the GPU.
+ */
+
+ count = 0;
+ for_each_engine(engine, gt, id) {
+ IGT_TIMEOUT(end_time);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ intel_engine_pm_get(engine);
+
+ do {
+ struct intel_timeline *tl;
+ struct i915_request *rq;
+
+ tl = intel_timeline_create(gt);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ break;
+ }
+
+ rq = checked_tl_write(tl, engine, count);
+ if (IS_ERR(rq)) {
+ intel_timeline_put(tl);
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ i915_request_put(rq);
+ intel_timeline_put(tl);
+ err = -EIO;
+ break;
+ }
+
+ if (READ_ONCE(*tl->hwsp_seqno) != count) {
+ GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x found 0x%x\n",
+ count, tl->fence_context,
+ tl->hwsp_offset, *tl->hwsp_seqno);
+ GEM_TRACE_DUMP();
+ err = -EINVAL;
+ }
+
+ i915_request_put(rq);
+ intel_timeline_put(tl);
+ count++;
+
+ if (err)
+ break;
+ } while (!__igt_timeout(end_time, NULL));
+
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int intel_timeline_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_hwsp_recycle),
+ SUBTEST(live_hwsp_engine),
+ SUBTEST(live_hwsp_alternate),
+ SUBTEST(live_hwsp_wrap),
+ SUBTEST(live_hwsp_read),
+ SUBTEST(live_hwsp_rollover_kernel),
+ SUBTEST(live_hwsp_rollover_user),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
new file mode 100644
index 000000000..67a9aab80
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -0,0 +1,1395 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "i915_selftest.h"
+#include "intel_reset.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/intel_scheduler_helpers.h"
+#include "selftests/mock_drm.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+static const struct wo_register {
+ enum intel_platform platform;
+ u32 reg;
+} wo_registers[] = {
+ { INTEL_GEMINILAKE, 0x731c }
+};
+
+struct wa_lists {
+ struct i915_wa_list gt_wa_list;
+ struct {
+ struct i915_wa_list wa_list;
+ struct i915_wa_list ctx_wa_list;
+ } engine[I915_NUM_ENGINES];
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIMEDOUT;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static void
+reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ memset(lists, 0, sizeof(*lists));
+
+ wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
+ gt_init_workarounds(gt, &lists->gt_wa_list);
+ wa_init_finish(&lists->gt_wa_list);
+
+ for_each_engine(engine, gt, id) {
+ struct i915_wa_list *wal = &lists->engine[id].wa_list;
+
+ wa_init_start(wal, "REF", engine->name);
+ engine_init_workarounds(engine, wal);
+ wa_init_finish(wal);
+
+ __intel_engine_init_ctx_wa(engine,
+ &lists->engine[id].ctx_wa_list,
+ "CTX_REF");
+ }
+}
+
+static void
+reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_wa_list_free(&lists->engine[id].wa_list);
+
+ intel_wa_list_free(&lists->gt_wa_list);
+}
+
+static struct drm_i915_gem_object *
+read_nonprivs(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ const u32 base = engine->mmio_base;
+ struct drm_i915_gem_object *result;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 srm, *cs;
+ int err;
+ int i;
+
+ result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(result))
+ return result;
+
+ i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
+
+ cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_obj;
+ }
+ memset(cs, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(result);
+ i915_gem_object_unpin_map(result);
+
+ vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_obj;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_pin;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ goto err_req;
+
+ srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (GRAPHICS_VER(engine->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
+ *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
+ *cs++ = 0;
+ }
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+ i915_vma_unpin(vma);
+
+ return result;
+
+err_req:
+ i915_request_add(rq);
+err_pin:
+ i915_vma_unpin(vma);
+err_obj:
+ i915_gem_object_put(result);
+ return ERR_PTR(err);
+}
+
+static u32
+get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
+{
+ i915_reg_t reg = i < engine->whitelist.count ?
+ engine->whitelist.list[i].reg :
+ RING_NOPID(engine->mmio_base);
+
+ return i915_mmio_reg_offset(reg);
+}
+
+static void
+print_results(const struct intel_engine_cs *engine, const u32 *results)
+{
+ unsigned int i;
+
+ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+ u32 expected = get_whitelist_reg(engine, i);
+ u32 actual = results[i];
+
+ pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
+ i, expected, actual);
+ }
+}
+
+static int check_whitelist(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct drm_i915_gem_object *results;
+ struct intel_wedge_me wedge;
+ u32 *vaddr;
+ int err;
+ int i;
+
+ results = read_nonprivs(ce);
+ if (IS_ERR(results))
+ return PTR_ERR(results);
+
+ err = 0;
+ i915_gem_object_lock(results, NULL);
+ intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
+ err = i915_gem_object_set_to_cpu_domain(results, false);
+
+ if (intel_gt_is_wedged(engine->gt))
+ err = -EIO;
+ if (err)
+ goto out_put;
+
+ vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+ u32 expected = get_whitelist_reg(engine, i);
+ u32 actual = vaddr[i];
+
+ if (expected != actual) {
+ print_results(engine, vaddr);
+ pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
+ i, expected, actual);
+
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(results);
+out_put:
+ i915_gem_object_unlock(results);
+ i915_gem_object_put(results);
+ return err;
+}
+
+static int do_device_reset(struct intel_engine_cs *engine)
+{
+ intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
+ return 0;
+}
+
+static int do_engine_reset(struct intel_engine_cs *engine)
+{
+ return intel_engine_reset(engine, "live_workarounds");
+}
+
+static int do_guc_reset(struct intel_engine_cs *engine)
+{
+ /* Currently a no-op as the reset is handled by GuC */
+ return 0;
+}
+
+static int
+switch_to_scratch_context(struct intel_engine_cs *engine,
+ struct igt_spinner *spin,
+ struct i915_request **rq)
+{
+ struct intel_context *ce;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ *rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+ intel_context_put(ce);
+
+ if (IS_ERR(*rq)) {
+ spin = NULL;
+ err = PTR_ERR(*rq);
+ goto err;
+ }
+
+ err = request_add_spin(*rq, spin);
+err:
+ if (err && spin)
+ igt_spinner_end(spin);
+
+ return err;
+}
+
+static int check_whitelist_across_reset(struct intel_engine_cs *engine,
+ int (*reset)(struct intel_engine_cs *),
+ const char *name)
+{
+ struct intel_context *ce, *tmp;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ int err;
+
+ pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, engine->name, name);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = igt_spinner_init(&spin, engine->gt);
+ if (err)
+ goto out_ctx;
+
+ err = check_whitelist(ce);
+ if (err) {
+ pr_err("Invalid whitelist *before* %s reset!\n", name);
+ goto out_spin;
+ }
+
+ err = switch_to_scratch_context(engine, &spin, &rq);
+ if (err)
+ goto out_spin;
+
+ /* Ensure the spinner hasn't aborted */
+ if (i915_request_completed(rq)) {
+ pr_err("%s spinner failed to start\n", name);
+ err = -ETIMEDOUT;
+ goto out_spin;
+ }
+
+ with_intel_runtime_pm(engine->uncore->rpm, wakeref)
+ err = reset(engine);
+
+ /* Ensure the reset happens and kills the engine */
+ if (err == 0)
+ err = intel_selftest_wait_for_rq(rq);
+
+ igt_spinner_end(&spin);
+
+ if (err) {
+ pr_err("%s reset failed\n", name);
+ goto out_spin;
+ }
+
+ err = check_whitelist(ce);
+ if (err) {
+ pr_err("Whitelist not preserved in context across %s reset!\n",
+ name);
+ goto out_spin;
+ }
+
+ tmp = intel_context_create(engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto out_spin;
+ }
+ intel_context_put(ce);
+ ce = tmp;
+
+ err = check_whitelist(ce);
+ if (err) {
+ pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
+ name);
+ goto out_spin;
+ }
+
+out_spin:
+ igt_spinner_fini(&spin);
+out_ctx:
+ intel_context_put(ce);
+ return err;
+}
+
+static struct i915_vma *create_batch(struct i915_address_space *vm)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 reg_write(u32 old, u32 new, u32 rsvd)
+{
+ if (rsvd == 0x0000ffff) {
+ old &= ~(new >> 16);
+ old |= new & (new >> 16);
+ } else {
+ old &= ~rsvd;
+ old |= new & rsvd;
+ }
+
+ return old;
+}
+
+static bool wo_register(struct intel_engine_cs *engine, u32 reg)
+{
+ enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
+ int i;
+
+ if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_WR)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
+ if (wo_registers[i].platform == platform &&
+ wo_registers[i].reg == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
+{
+ reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
+ switch (reg) {
+ case 0x358:
+ case 0x35c:
+ case 0x3a8:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool ro_register(u32 reg)
+{
+ if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_RD)
+ return true;
+
+ return false;
+}
+
+static int whitelist_writable_count(struct intel_engine_cs *engine)
+{
+ int count = engine->whitelist.count;
+ int i;
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ if (ro_register(reg))
+ count--;
+ }
+
+ return count;
+}
+
+static int check_dirty_whitelist(struct intel_context *ce)
+{
+ const u32 values[] = {
+ 0x00000000,
+ 0x01010101,
+ 0x10100101,
+ 0x03030303,
+ 0x30300303,
+ 0x05050505,
+ 0x50500505,
+ 0x0f0f0f0f,
+ 0xf00ff00f,
+ 0x10101010,
+ 0xf0f01010,
+ 0x30303030,
+ 0xa0a03030,
+ 0x50505050,
+ 0xc0c05050,
+ 0xf0f0f0f0,
+ 0x11111111,
+ 0x33333333,
+ 0x55555555,
+ 0x0000ffff,
+ 0x00ff00ff,
+ 0xff0000ff,
+ 0xffff00ff,
+ 0xffffffff,
+ };
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_vma *scratch;
+ struct i915_vma *batch;
+ int err = 0, i, v, sz;
+ u32 *cs, *results;
+
+ sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
+ scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ batch = create_batch(ce->vm);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_scratch;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ struct i915_gem_ww_ctx ww;
+ u64 addr = scratch->node.start;
+ struct i915_request *rq;
+ u32 srm, lrm, rsvd;
+ u32 expect;
+ int idx;
+ bool ro_reg;
+
+ if (wo_register(engine, reg))
+ continue;
+
+ if (timestamp(engine, reg))
+ continue; /* timestamps are expected to autoincrement */
+
+ ro_reg = ro_register(reg);
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ cs = NULL;
+ err = i915_gem_object_lock(scratch->obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock(batch->obj, &ww);
+ if (!err)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto out;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_ctx;
+ }
+
+ results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto out_unmap_batch;
+ }
+
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
+ srm = MI_STORE_REGISTER_MEM;
+ lrm = MI_LOAD_REGISTER_MEM;
+ if (GRAPHICS_VER(engine->i915) >= 8)
+ lrm++, srm++;
+
+ pr_debug("%s: Writing garbage to %x\n",
+ engine->name, reg);
+
+ /* SRM original */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = ~values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
+
+ /* LRM original -- don't leave garbage in the context! */
+ *cs++ = lrm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+ intel_gt_chipset_flush(engine->gt);
+ cs = NULL;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unmap_scratch;
+ }
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto err_request;
+
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(scratch, rq,
+ EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_request;
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, PAGE_SIZE,
+ 0);
+ if (err)
+ goto err_request;
+
+err_request:
+ err = request_add_sync(rq, err);
+ if (err) {
+ pr_err("%s: Futzing %x timedout; cancelling test\n",
+ engine->name, reg);
+ intel_gt_set_wedged(engine->gt);
+ goto out_unmap_scratch;
+ }
+
+ GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
+ if (!ro_reg) {
+ /* detect write masking */
+ rsvd = results[ARRAY_SIZE(values)];
+ if (!rsvd) {
+ pr_err("%s: Unable to write to whitelisted register %x\n",
+ engine->name, reg);
+ err = -EINVAL;
+ goto out_unmap_scratch;
+ }
+ } else {
+ rsvd = 0;
+ }
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, values[v], rsvd);
+
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, ~values[v], rsvd);
+
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ if (err) {
+ pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
+ engine->name, err, reg);
+
+ if (ro_reg)
+ pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
+ engine->name, reg, results[0]);
+ else
+ pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+ engine->name, reg, results[0], rsvd);
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = values[v];
+
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = ~values[v];
+
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+
+ err = -EINVAL;
+ }
+out_unmap_scratch:
+ i915_gem_object_unpin_map(scratch->obj);
+out_unmap_batch:
+ if (cs)
+ i915_gem_object_unpin_map(batch->obj);
+out_ctx:
+ intel_context_unpin(ce);
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(engine->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&batch, 0);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_dirty_whitelist(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Can the user write to the whitelisted registers? */
+
+ if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ int err;
+
+ if (engine->whitelist.count == 0)
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = check_dirty_whitelist(ce);
+ intel_context_put(ce);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int live_reset_whitelist(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* If we reset the gpu, we should not lose the RING_NONPRIV */
+ igt_global_reset_lock(gt);
+
+ for_each_engine(engine, gt, id) {
+ if (engine->whitelist.count == 0)
+ continue;
+
+ if (intel_has_reset_engine(gt)) {
+ if (intel_engine_uses_guc(engine)) {
+ struct intel_selftest_saved_policy saved;
+ int err2;
+
+ err = intel_selftest_modify_policy(engine, &saved,
+ SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+ if (err)
+ goto out;
+
+ err = check_whitelist_across_reset(engine,
+ do_guc_reset,
+ "guc");
+
+ err2 = intel_selftest_restore_policy(engine, &saved);
+ if (err == 0)
+ err = err2;
+ } else {
+ err = check_whitelist_across_reset(engine,
+ do_engine_reset,
+ "engine");
+ }
+
+ if (err)
+ goto out;
+ }
+
+ if (intel_has_gpu_reset(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_device_reset,
+ "device");
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ igt_global_reset_unlock(gt);
+ return err;
+}
+
+static int read_whitelisted_registers(struct intel_context *ce,
+ struct i915_vma *results)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_request *rq;
+ int i, err = 0;
+ u32 srm, *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_vma_lock(results);
+ err = i915_request_await_object(rq, results->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(results);
+ if (err)
+ goto err_req;
+
+ srm = MI_STORE_REGISTER_MEM;
+ if (GRAPHICS_VER(engine->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u64 offset = results->node.start + sizeof(u32) * i;
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ }
+ intel_ring_advance(rq, cs);
+
+err_req:
+ return request_add_sync(rq, err);
+}
+
+static int scrub_whitelisted_registers(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ int i, err = 0;
+ u32 *cs;
+
+ batch = create_batch(ce->vm);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ if (ro_register(reg))
+ continue;
+
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
+ *cs++ = reg;
+ *cs++ = 0xffffffff;
+ }
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ intel_gt_chipset_flush(engine->gt);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
+ /* Perform the writes from an unprivileged "user" batch */
+ err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+
+err_request:
+ err = request_add_sync(rq, err);
+
+err_unpin:
+ i915_gem_object_unpin_map(batch->obj);
+err_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+ return err;
+}
+
+struct regmask {
+ i915_reg_t reg;
+ u8 graphics_ver;
+};
+
+static bool find_reg(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct regmask *tbl,
+ unsigned long count)
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+
+ while (count--) {
+ if (GRAPHICS_VER(i915) == tbl->graphics_ver &&
+ i915_mmio_reg_offset(tbl->reg) == offset)
+ return true;
+ tbl++;
+ }
+
+ return false;
+}
+
+static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Alas, we must pardon some whitelists. Mistakes already made */
+ static const struct regmask pardon[] = {
+ { GEN9_CTX_PREEMPT_REG, 9 },
+ { GEN8_L3SQCREG4, 9 },
+ };
+
+ return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
+}
+
+static bool result_eq(struct intel_engine_cs *engine,
+ u32 a, u32 b, i915_reg_t reg)
+{
+ if (a != b && !pardon_reg(engine->i915, reg)) {
+ pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
+ i915_mmio_reg_offset(reg), a, b);
+ return false;
+ }
+
+ return true;
+}
+
+static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Some registers do not seem to behave and our writes unreadable */
+ static const struct regmask wo[] = {
+ { GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 },
+ };
+
+ return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
+}
+
+static bool result_neq(struct intel_engine_cs *engine,
+ u32 a, u32 b, i915_reg_t reg)
+{
+ if (a == b && !writeonly_reg(engine->i915, reg)) {
+ pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
+ i915_mmio_reg_offset(reg), a);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+check_whitelisted_registers(struct intel_engine_cs *engine,
+ struct i915_vma *A,
+ struct i915_vma *B,
+ bool (*fn)(struct intel_engine_cs *engine,
+ u32 a, u32 b,
+ i915_reg_t reg))
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ const struct i915_wa *wa = &engine->whitelist.list[i];
+
+ if (i915_mmio_reg_offset(wa->reg) &
+ RING_FORCE_TO_NONPRIV_ACCESS_RD)
+ continue;
+
+ if (!fn(engine, a[i], b[i], wa->reg))
+ err = -EINVAL;
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int live_isolated_whitelist(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct {
+ struct i915_vma *scratch[2];
+ } client[2] = {};
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int i, err = 0;
+
+ /*
+ * Check that a write into a whitelist register works, but
+ * invisible to a second context.
+ */
+
+ if (!intel_engines_has_context_isolation(gt->i915))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ client[i].scratch[0] =
+ __vm_create_scratch_for_read_pinned(gt->vm, 4096);
+ if (IS_ERR(client[i].scratch[0])) {
+ err = PTR_ERR(client[i].scratch[0]);
+ goto err;
+ }
+
+ client[i].scratch[1] =
+ __vm_create_scratch_for_read_pinned(gt->vm, 4096);
+ if (IS_ERR(client[i].scratch[1])) {
+ err = PTR_ERR(client[i].scratch[1]);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ goto err;
+ }
+ }
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2];
+
+ if (!engine->kernel_context->vm)
+ continue;
+
+ if (!whitelist_writable_count(engine))
+ continue;
+
+ ce[0] = intel_context_create(engine);
+ if (IS_ERR(ce[0])) {
+ err = PTR_ERR(ce[0]);
+ break;
+ }
+ ce[1] = intel_context_create(engine);
+ if (IS_ERR(ce[1])) {
+ err = PTR_ERR(ce[1]);
+ intel_context_put(ce[0]);
+ break;
+ }
+
+ /* Read default values */
+ err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
+ if (err)
+ goto err_ce;
+
+ /* Try to overwrite registers (should only affect ctx0) */
+ err = scrub_whitelisted_registers(ce[0]);
+ if (err)
+ goto err_ce;
+
+ /* Read values from ctx1, we expect these to be defaults */
+ err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
+ if (err)
+ goto err_ce;
+
+ /* Verify that both reads return the same default values */
+ err = check_whitelisted_registers(engine,
+ client[0].scratch[0],
+ client[1].scratch[0],
+ result_eq);
+ if (err)
+ goto err_ce;
+
+ /* Read back the updated values in ctx0 */
+ err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
+ if (err)
+ goto err_ce;
+
+ /* User should be granted privilege to overwhite regs */
+ err = check_whitelisted_registers(engine,
+ client[0].scratch[0],
+ client[0].scratch[1],
+ result_neq);
+err_ce:
+ intel_context_put(ce[1]);
+ intel_context_put(ce[0]);
+ if (err)
+ break;
+ }
+
+err:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ i915_vma_unpin_and_release(&client[i].scratch[1], 0);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
+static bool
+verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
+ const char *str)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool ok = true;
+
+ ok &= wa_list_verify(gt, &lists->gt_wa_list, str);
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return false;
+
+ ok &= engine_wa_list_verify(ce,
+ &lists->engine[id].wa_list,
+ str) == 0;
+
+ ok &= engine_wa_list_verify(ce,
+ &lists->engine[id].ctx_wa_list,
+ str) == 0;
+
+ intel_context_put(ce);
+ }
+
+ return ok;
+}
+
+static int
+live_gpu_reset_workarounds(void *arg)
+{
+ struct intel_gt *gt = arg;
+ intel_wakeref_t wakeref;
+ struct wa_lists *lists;
+ bool ok;
+
+ if (!intel_has_gpu_reset(gt))
+ return 0;
+
+ lists = kzalloc(sizeof(*lists), GFP_KERNEL);
+ if (!lists)
+ return -ENOMEM;
+
+ pr_info("Verifying after GPU reset...\n");
+
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ reference_lists_init(gt, lists);
+
+ ok = verify_wa_lists(gt, lists, "before reset");
+ if (!ok)
+ goto out;
+
+ intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
+
+ ok = verify_wa_lists(gt, lists, "after reset");
+
+out:
+ reference_lists_fini(gt, lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
+ kfree(lists);
+
+ return ok ? 0 : -ESRCH;
+}
+
+static int
+live_engine_reset_workarounds(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct wa_lists *lists;
+ int ret = 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ lists = kzalloc(sizeof(*lists), GFP_KERNEL);
+ if (!lists)
+ return -ENOMEM;
+
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ reference_lists_init(gt, lists);
+
+ for_each_engine(engine, gt, id) {
+ struct intel_selftest_saved_policy saved;
+ bool using_guc = intel_engine_uses_guc(engine);
+ bool ok;
+ int ret2;
+
+ pr_info("Verifying after %s reset...\n", engine->name);
+ ret = intel_selftest_modify_policy(engine, &saved,
+ SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+ if (ret)
+ break;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto restore;
+ }
+
+ if (!using_guc) {
+ ok = verify_wa_lists(gt, lists, "before reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ ret = intel_engine_reset(engine, "live_workarounds:idle");
+ if (ret) {
+ pr_err("%s: Reset failed while idle\n", engine->name);
+ goto err;
+ }
+
+ ok = verify_wa_lists(gt, lists, "after idle reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret)
+ goto err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ igt_spinner_fini(&spin);
+ goto err;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ pr_err("%s: Spinner failed to start\n", engine->name);
+ igt_spinner_fini(&spin);
+ goto err;
+ }
+
+ /* Ensure the spinner hasn't aborted */
+ if (i915_request_completed(rq)) {
+ ret = -ETIMEDOUT;
+ goto skip;
+ }
+
+ if (!using_guc) {
+ ret = intel_engine_reset(engine, "live_workarounds:active");
+ if (ret) {
+ pr_err("%s: Reset failed on an active spinner\n",
+ engine->name);
+ igt_spinner_fini(&spin);
+ goto err;
+ }
+ }
+
+ /* Ensure the reset happens and kills the engine */
+ if (ret == 0)
+ ret = intel_selftest_wait_for_rq(rq);
+
+skip:
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ ok = verify_wa_lists(gt, lists, "after busy reset");
+ if (!ok)
+ ret = -ESRCH;
+
+err:
+ intel_context_put(ce);
+
+restore:
+ ret2 = intel_selftest_restore_policy(engine, &saved);
+ if (ret == 0)
+ ret = ret2;
+ if (ret)
+ break;
+ }
+
+ reference_lists_fini(gt, lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
+ kfree(lists);
+
+ igt_flush_test(gt->i915);
+
+ return ret;
+}
+
+int intel_workarounds_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_dirty_whitelist),
+ SUBTEST(live_reset_whitelist),
+ SUBTEST(live_isolated_whitelist),
+ SUBTEST(live_gpu_reset_workarounds),
+ SUBTEST(live_engine_reset_workarounds),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
new file mode 100644
index 000000000..aeb1d1f61
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#include "../intel_timeline.h"
+
+#include "mock_timeline.h"
+
+void mock_timeline_init(struct intel_timeline *timeline, u64 context)
+{
+ timeline->gt = NULL;
+ timeline->fence_context = context;
+
+ mutex_init(&timeline->mutex);
+
+ INIT_ACTIVE_FENCE(&timeline->last_request);
+ INIT_LIST_HEAD(&timeline->requests);
+
+ i915_syncmap_init(&timeline->sync);
+
+ INIT_LIST_HEAD(&timeline->link);
+}
+
+void mock_timeline_fini(struct intel_timeline *timeline)
+{
+ i915_syncmap_free(&timeline->sync);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
new file mode 100644
index 000000000..d2bcc3df6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
@@ -0,0 +1,17 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#ifndef __MOCK_TIMELINE__
+#define __MOCK_TIMELINE__
+
+#include <linux/types.h>
+
+struct intel_timeline;
+
+void mock_timeline_init(struct intel_timeline *timeline, u64 context);
+void mock_timeline_fini(struct intel_timeline *timeline);
+
+#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
new file mode 100644
index 000000000..e7e96d707
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/README
@@ -0,0 +1,46 @@
+ASM sources for auto generated shaders
+======================================
+
+The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
+pre-compiled batch chunks that will clear any residual render cache during
+context switch.
+
+They are generated from their respective platform ASM files present on
+i915/gt/shaders/clear_kernel directory.
+
+The generated .c files should never be modified directly. Instead, any modification
+needs to be done on the on their respective ASM files and build instructions below
+needes to be followed.
+
+Building
+========
+
+Environment
+-----------
+
+IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
+on building.
+
+Please make sure your Mesa tool is compiled with "-Dtools=intel" and
+"-Ddri-drivers=i965", and run this script from IGT source root directory"
+
+The instructions bellow assume:
+ * IGT gpu tools source code is located on your home directory (~) as ~/igt
+ * Mesa source code is located on your home directory (~) as ~/mesa
+ and built under the ~/mesa/build directory
+ * Linux kernel source code is under your home directory (~) as ~/linux
+
+Instructions
+------------
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/ivb.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g ivb \
+ -m ~/mesa/build/src/intel/tools/i965_asm
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/hsw.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g hsw \
+ -m ~/mesa/build/src/intel/tools/i965_asm \ No newline at end of file
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
new file mode 100644
index 000000000..5fdf384bb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout:
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 352D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -32D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -64D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
new file mode 100644
index 000000000..97c7ac9e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout :
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 44D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -4D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -8D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
new file mode 100644
index 000000000..402f085f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/iosys-map.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+
+#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_lmem.h"
+#include "shmem_utils.h"
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len)
+{
+ struct file *file;
+ int err;
+
+ file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
+ if (IS_ERR(file))
+ return file;
+
+ err = shmem_write(file, 0, data, len);
+ if (err) {
+ fput(file);
+ return ERR_PTR(err);
+ }
+
+ return file;
+}
+
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
+{
+ struct file *file;
+ void *ptr;
+
+ if (i915_gem_object_is_shmem(obj)) {
+ file = obj->base.filp;
+ atomic_long_inc(&file->f_count);
+ return file;
+ }
+
+ ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
+ I915_MAP_WC : I915_MAP_WB);
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ file = shmem_create_from_data("", ptr, obj->base.size);
+ i915_gem_object_unpin_map(obj);
+
+ return file;
+}
+
+void *shmem_pin_map(struct file *file)
+{
+ struct page **pages;
+ size_t n_pages, i;
+ void *vaddr;
+
+ n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ for (i = 0; i < n_pages; i++) {
+ pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
+ GFP_KERNEL);
+ if (IS_ERR(pages[i]))
+ goto err_page;
+ }
+
+ vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ if (!vaddr)
+ goto err_page;
+ mapping_set_unevictable(file->f_mapping);
+ return vaddr;
+err_page:
+ while (i--)
+ put_page(pages[i]);
+ kvfree(pages);
+ return NULL;
+}
+
+void shmem_unpin_map(struct file *file, void *ptr)
+{
+ mapping_clear_unevictable(file->f_mapping);
+ vfree(ptr);
+}
+
+static int __shmem_rw(struct file *file, loff_t off,
+ void *ptr, size_t len,
+ bool write)
+{
+ unsigned long pfn;
+
+ for (pfn = off >> PAGE_SHIFT; len; pfn++) {
+ unsigned int this =
+ min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
+ struct page *page;
+ void *vaddr;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap(page);
+ if (write) {
+ memcpy(vaddr + offset_in_page(off), ptr, this);
+ set_page_dirty(page);
+ } else {
+ memcpy(ptr, vaddr + offset_in_page(off), this);
+ }
+ mark_page_accessed(page);
+ kunmap(page);
+ put_page(page);
+
+ len -= this;
+ ptr += this;
+ off = 0;
+ }
+
+ return 0;
+}
+
+int shmem_read_to_iosys_map(struct file *file, loff_t off,
+ struct iosys_map *map, size_t map_off, size_t len)
+{
+ unsigned long pfn;
+
+ for (pfn = off >> PAGE_SHIFT; len; pfn++) {
+ unsigned int this =
+ min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
+ struct page *page;
+ void *vaddr;
+
+ page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+ GFP_KERNEL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap(page);
+ iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
+ this);
+ mark_page_accessed(page);
+ kunmap(page);
+ put_page(page);
+
+ len -= this;
+ map_off += this;
+ off = 0;
+ }
+
+ return 0;
+}
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
+{
+ return __shmem_rw(file, off, dst, len, false);
+}
+
+int shmem_write(struct file *file, loff_t off, void *src, size_t len)
+{
+ return __shmem_rw(file, off, src, len, true);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "st_shmem_utils.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.h b/drivers/gpu/drm/i915/gt/shmem_utils.h
new file mode 100644
index 000000000..b2b04d88c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SHMEM_UTILS_H
+#define SHMEM_UTILS_H
+
+#include <linux/types.h>
+
+struct iosys_map;
+struct drm_i915_gem_object;
+struct file;
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len);
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj);
+
+void *shmem_pin_map(struct file *file);
+void shmem_unpin_map(struct file *file, void *ptr);
+
+int shmem_read_to_iosys_map(struct file *file, loff_t off,
+ struct iosys_map *map, size_t map_off, size_t len);
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len);
+int shmem_write(struct file *file, loff_t off, void *src, size_t len);
+
+#endif /* SHMEM_UTILS_H */
diff --git a/drivers/gpu/drm/i915/gt/st_shmem_utils.c b/drivers/gpu/drm/i915/gt/st_shmem_utils.c
new file mode 100644
index 000000000..b279fe88b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/st_shmem_utils.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/* Just a quick and causal check of the shmem_utils API */
+
+static int igt_shmem_basic(void *ignored)
+{
+ u32 datum = 0xdeadbeef, result;
+ struct file *file;
+ u32 *map;
+ int err;
+
+ file = shmem_create_from_data("mock", &datum, sizeof(datum));
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ result = 0;
+ err = shmem_read(file, 0, &result, sizeof(result));
+ if (err)
+ goto out_file;
+
+ if (result != datum) {
+ pr_err("Incorrect read back from shmemfs: %x != %x\n",
+ result, datum);
+ err = -EINVAL;
+ goto out_file;
+ }
+
+ result = 0xc0ffee;
+ err = shmem_write(file, 0, &result, sizeof(result));
+ if (err)
+ goto out_file;
+
+ map = shmem_pin_map(file);
+ if (!map) {
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ if (*map != result) {
+ pr_err("Incorrect read back via mmap of last write: %x != %x\n",
+ *map, result);
+ err = -EINVAL;
+ goto out_map;
+ }
+
+out_map:
+ shmem_unpin_map(file, map);
+out_file:
+ fput(file);
+ return err;
+}
+
+int shmem_utils_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_shmem_basic),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
new file mode 100644
index 000000000..f2d9858d8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
+#include "sysfs_engines.h"
+
+struct kobj_engine {
+ struct kobject base;
+ struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static struct kobj_attribute name_attr =
+__ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static struct kobj_attribute class_attr =
+__ATTR(class, 0444, class_show, NULL);
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static struct kobj_attribute inst_attr =
+__ATTR(instance, 0444, inst_show, NULL);
+
+static ssize_t
+mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+}
+
+static struct kobj_attribute mmio_attr =
+__ATTR(mmio_base, 0444, mmio_show, NULL);
+
+static const char * const vcs_caps[] = {
+ [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static const char * const vecs_caps[] = {
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static ssize_t repr_trim(char *buf, ssize_t len)
+{
+ /* Trim off the trailing space and replace with a newline */
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len > 0)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+__caps_show(struct intel_engine_cs *engine,
+ unsigned long caps, char *buf, bool show_unknown)
+{
+ const char * const *repr;
+ int count, n;
+ ssize_t len;
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ repr = vcs_caps;
+ count = ARRAY_SIZE(vcs_caps);
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ repr = vecs_caps;
+ count = ARRAY_SIZE(vecs_caps);
+ break;
+
+ default:
+ repr = NULL;
+ count = 0;
+ break;
+ }
+ GEM_BUG_ON(count > BITS_PER_LONG);
+
+ len = 0;
+ for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
+ if (n >= count || !repr[n]) {
+ if (GEM_WARN_ON(show_unknown))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "[%x] ", n);
+ } else {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s ", repr[n]);
+ }
+ if (GEM_WARN_ON(len >= PAGE_SIZE))
+ break;
+ }
+ return repr_trim(buf, len);
+}
+
+static ssize_t
+caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return __caps_show(engine, engine->uabi_capabilities, buf, true);
+}
+
+static struct kobj_attribute caps_attr =
+__ATTR(capabilities, 0444, caps_show, NULL);
+
+static ssize_t
+all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return __caps_show(kobj_to_engine(kobj), -1, buf, false);
+}
+
+static struct kobj_attribute all_caps_attr =
+__ATTR(known_capabilities, 0444, all_caps_show, NULL);
+
+static ssize_t
+max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration, clamped;
+ int err;
+
+ /*
+ * When waiting for a request, if is it currently being executed
+ * on the GPU, we busywait for a short while before sleeping. The
+ * premise is that most requests are short, and if it is already
+ * executing then there is a good chance that it will complete
+ * before we can setup the interrupt handler and go to sleep.
+ * We try to offset the cost of going to sleep, by first spinning
+ * on the request -- if it completed in less time than it would take
+ * to go sleep, process the interrupt and return back to the client,
+ * then we have saved the client some latency, albeit at the cost
+ * of spinning on an expensive CPU core.
+ *
+ * While we try to avoid waiting at all for a request that is unlikely
+ * to complete, deciding how long it is worth spinning is for is an
+ * arbitrary decision: trading off power vs latency.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
+ if (duration != clamped)
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+
+ return count;
+}
+
+static ssize_t
+max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_attr =
+__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
+
+static ssize_t
+max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_def =
+__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
+
+static ssize_t
+timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration, clamped;
+ int err;
+
+ /*
+ * Execlists uses a scheduling quantum (a timeslice) to alternate
+ * execution between ready-to-run contexts of equal priority. This
+ * ensures that all users (though only if they of equal importance)
+ * have the opportunity to run and prevents livelocks where contexts
+ * may have implicit ordering due to userspace semaphores.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ clamped = intel_clamp_timeslice_duration_ms(engine, duration);
+ if (duration != clamped)
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+
+ if (execlists_active(&engine->execlists))
+ set_timer_ms(&engine->execlists.timer, duration);
+
+ return count;
+}
+
+static ssize_t
+timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_attr =
+__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
+
+static ssize_t
+timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_def =
+__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
+
+static ssize_t
+stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration, clamped;
+ int err;
+
+ /*
+ * When we allow ourselves to sleep before a GPU reset after disabling
+ * submission, even for a few milliseconds, gives an innocent context
+ * the opportunity to clear the GPU before the reset occurs. However,
+ * how long to sleep depends on the typical non-preemptible duration
+ * (a similar problem to determining the ideal preempt-reset timeout
+ * or even the heartbeat interval).
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ clamped = intel_clamp_stop_timeout_ms(engine, duration);
+ if (duration != clamped)
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+ return count;
+}
+
+static ssize_t
+stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_attr =
+__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
+
+static ssize_t
+stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_def =
+__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
+
+static ssize_t
+preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long timeout, clamped;
+ int err;
+
+ /*
+ * After initialising a preemption request, we give the current
+ * resident a small amount of time to vacate the GPU. The preemption
+ * request is for a higher priority context and should be immediate to
+ * maintain high quality of service (and avoid priority inversion).
+ * However, the preemption granularity of the GPU can be quite coarse
+ * and so we need a compromise.
+ */
+
+ err = kstrtoull(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
+ if (timeout != clamped)
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ set_timer_ms(&engine->execlists.preempt, timeout);
+
+ return count;
+}
+
+static ssize_t
+preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
+
+static ssize_t
+preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_def =
+__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
+
+static ssize_t
+heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long delay, clamped;
+ int err;
+
+ /*
+ * We monitor the health of the system via periodic heartbeat pulses.
+ * The pulses also provide the opportunity to perform garbage
+ * collection. However, we interpret an incomplete pulse (a missed
+ * heartbeat) as an indication that the system is no longer responsive,
+ * i.e. hung, and perform an engine or full GPU reset. Given that the
+ * preemption granularity can be very coarse on a system, the optimal
+ * value for any workload is unknowable!
+ */
+
+ err = kstrtoull(buf, 0, &delay);
+ if (err)
+ return err;
+
+ clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
+ if (delay != clamped)
+ return -EINVAL;
+
+ err = intel_engine_set_heartbeat(engine, delay);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t
+heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_attr =
+__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+
+static ssize_t
+heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_def =
+__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+ .release = kobj_engine_release,
+ .sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return NULL;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = engine;
+
+ if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+ kobject_put(&ke->base);
+ return NULL;
+ }
+
+ /* xfer ownership to sysfs tree */
+ return &ke->base;
+}
+
+static void add_defaults(struct kobj_engine *parent)
+{
+ static const struct attribute *files[] = {
+ &max_spin_def.attr,
+ &stop_timeout_def.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_def.attr,
+#endif
+ NULL
+ };
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = parent->engine;
+
+ if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
+ kobject_put(&ke->base);
+ return;
+ }
+
+ if (sysfs_create_files(&ke->base, files))
+ return;
+
+ if (intel_engine_has_timeslices(ke->engine) &&
+ sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
+ return;
+
+ if (intel_engine_has_preempt_reset(ke->engine) &&
+ sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
+ return;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+ static const struct attribute *files[] = {
+ &name_attr.attr,
+ &class_attr.attr,
+ &inst_attr.attr,
+ &mmio_attr.attr,
+ &caps_attr.attr,
+ &all_caps_attr.attr,
+ &max_spin_attr.attr,
+ &stop_timeout_attr.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_attr.attr,
+#endif
+ NULL
+ };
+
+ struct device *kdev = i915->drm.primary->kdev;
+ struct intel_engine_cs *engine;
+ struct kobject *dir;
+
+ dir = kobject_create_and_add("engine", &kdev->kobj);
+ if (!dir)
+ return;
+
+ for_each_uabi_engine(engine, i915) {
+ struct kobject *kobj;
+
+ kobj = kobj_engine(dir, engine);
+ if (!kobj)
+ goto err_engine;
+
+ if (sysfs_create_files(kobj, files))
+ goto err_object;
+
+ if (intel_engine_has_timeslices(engine) &&
+ sysfs_create_file(kobj, &timeslice_duration_attr.attr))
+ goto err_engine;
+
+ if (intel_engine_has_preempt_reset(engine) &&
+ sysfs_create_file(kobj, &preempt_timeout_attr.attr))
+ goto err_engine;
+
+ add_defaults(container_of(kobj, struct kobj_engine, base));
+
+ if (0) {
+err_object:
+ kobject_put(kobj);
+err_engine:
+ dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.h b/drivers/gpu/drm/i915/gt/sysfs_engines.h
new file mode 100644
index 000000000..9546fffe0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
new file mode 100644
index 000000000..29ef8afc8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_ACTIONS_ABI_H
+#define _ABI_GUC_ACTIONS_ABI_H
+
+/**
+ * DOC: HOST2GUC_SELF_CFG
+ *
+ * This message is used by Host KMD to setup of the `GuC Self Config KLVs`_.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_SELF_CFG` = 0x0508 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:16 | **KLV_KEY** - KLV key, see `GuC Self Config KLVs`_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **KLV_LEN** - KLV length |
+ * | | | |
+ * | | | - 32 bit KLV = 1 |
+ * | | | - 64 bit KLV = 2 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **VALUE32** - Bits 31-0 of the KLV value |
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | **VALUE64** - Bits 63-32 of the KLV value (**KLV_LEN** = 2) |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = **NUM** - 1 if KLV was parsed, 0 if not recognized |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_HOST2GUC_SELF_CFG 0x0508
+
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffU << 16)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
+
+#define HOST2GUC_SELF_CFG_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_SELF_CFG_RESPONSE_MSG_0_NUM GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
+ * DOC: HOST2GUC_CONTROL_CTB
+ *
+ * This H2G action allows Vf Host to enable or disable H2G and G2H `CT Buffer`_.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_CONTROL_CTB` = 0x4509 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **CONTROL** - control `CTB based communication`_ |
+ * | | | |
+ * | | | - _`GUC_CTB_CONTROL_DISABLE` = 0 |
+ * | | | - _`GUC_CTB_CONTROL_ENABLE` = 1 |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_HOST2GUC_CONTROL_CTB 0x4509
+
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL GUC_HXG_REQUEST_MSG_n_DATAn
+#define GUC_CTB_CONTROL_DISABLE 0u
+#define GUC_CTB_CONTROL_ENABLE 1u
+
+#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/* legacy definitions */
+
+enum intel_guc_action {
+ INTEL_GUC_ACTION_DEFAULT = 0x0,
+ INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
+ INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
+ INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+ INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
+ INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
+ INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
+ INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
+ INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+ INTEL_GUC_ACTION_SCHED_CONTEXT = 0x1000,
+ INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
+ INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
+ INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
+ INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
+ INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
+ INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
+ INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
+ INTEL_GUC_ACTION_SETUP_PC_GUCRC = 0x3004,
+ INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+ INTEL_GUC_ACTION_GET_HWCONFIG = 0x4100,
+ INTEL_GUC_ACTION_REGISTER_CONTEXT = 0x4502,
+ INTEL_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
+ INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
+ INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
+ INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
+ INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
+ INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
+ INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
+ INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
+ INTEL_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005,
+ INTEL_GUC_ACTION_LIMIT
+};
+
+enum intel_guc_rc_options {
+ INTEL_GUCRC_HOST_CONTROL,
+ INTEL_GUCRC_FIRMWARE_CONTROL,
+};
+
+enum intel_guc_preempt_options {
+ INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
+ INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
+};
+
+enum intel_guc_report_status {
+ INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0,
+ INTEL_GUC_REPORT_STATUS_ACKED = 0x1,
+ INTEL_GUC_REPORT_STATUS_ERROR = 0x2,
+ INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
+};
+
+enum intel_guc_sleep_state_status {
+ INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1,
+ INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
+ INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
+#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
+};
+
+#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
+#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
+#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
+#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
+
+enum intel_guc_state_capture_event_status {
+ INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_SUCCESS = 0x0,
+ INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE = 0x1,
+};
+
+#define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
+
+#endif /* _ABI_GUC_ACTIONS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
new file mode 100644
index 000000000..4c840a263
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _GUC_ACTIONS_SLPC_ABI_H_
+#define _GUC_ACTIONS_SLPC_ABI_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: SLPC SHARED DATA STRUCTURE
+ *
+ * +----+------+--------------------------------------------------------------+
+ * | CL | Bytes| Description |
+ * +====+======+==============================================================+
+ * | 1 | 0-3 | SHARED DATA SIZE |
+ * | +------+--------------------------------------------------------------+
+ * | | 4-7 | GLOBAL STATE |
+ * | +------+--------------------------------------------------------------+
+ * | | 8-11 | DISPLAY DATA ADDRESS |
+ * | +------+--------------------------------------------------------------+
+ * | | 12:63| PADDING |
+ * +----+------+--------------------------------------------------------------+
+ * | | 0:63 | PADDING(PLATFORM INFO) |
+ * +----+------+--------------------------------------------------------------+
+ * | 3 | 0-3 | TASK STATE DATA |
+ * + +------+--------------------------------------------------------------+
+ * | | 4:63 | PADDING |
+ * +----+------+--------------------------------------------------------------+
+ * |4-21|0:1087| OVERRIDE PARAMS AND BIT FIELDS |
+ * +----+------+--------------------------------------------------------------+
+ * | | | PADDING + EXTRA RESERVED PAGE |
+ * +----+------+--------------------------------------------------------------+
+ */
+
+/*
+ * SLPC exposes certain parameters for global configuration by the host.
+ * These are referred to as override parameters, because in most cases
+ * the host will not need to modify the default values used by SLPC.
+ * SLPC remembers the default values which allows the host to easily restore
+ * them by simply unsetting the override. The host can set or unset override
+ * parameters during SLPC (re-)initialization using the SLPC Reset event.
+ * The host can also set or unset override parameters on the fly using the
+ * Parameter Set and Parameter Unset events
+ */
+
+#define SLPC_MAX_OVERRIDE_PARAMETERS 256
+#define SLPC_OVERRIDE_BITFIELD_SIZE \
+ (SLPC_MAX_OVERRIDE_PARAMETERS / 32)
+
+#define SLPC_PAGE_SIZE_BYTES 4096
+#define SLPC_CACHELINE_SIZE_BYTES 64
+#define SLPC_SHARED_DATA_SIZE_BYTE_HEADER SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_MODE_DEFN_TABLE_SIZE SLPC_PAGE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_MAX (2 * SLPC_PAGE_SIZE_BYTES)
+
+/*
+ * Cacheline size aligned (Total size needed for
+ * SLPM_KMD_MAX_OVERRIDE_PARAMETERS=256 is 1088 bytes)
+ */
+#define SLPC_OVERRIDE_PARAMS_TOTAL_BYTES (((((SLPC_MAX_OVERRIDE_PARAMETERS * 4) \
+ + ((SLPC_MAX_OVERRIDE_PARAMETERS / 32) * 4)) \
+ + (SLPC_CACHELINE_SIZE_BYTES - 1)) / SLPC_CACHELINE_SIZE_BYTES) * \
+ SLPC_CACHELINE_SIZE_BYTES)
+
+#define SLPC_SHARED_DATA_SIZE_BYTE_OTHER (SLPC_SHARED_DATA_SIZE_BYTE_MAX - \
+ (SLPC_SHARED_DATA_SIZE_BYTE_HEADER \
+ + SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO \
+ + SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE \
+ + SLPC_OVERRIDE_PARAMS_TOTAL_BYTES \
+ + SLPC_SHARED_DATA_MODE_DEFN_TABLE_SIZE))
+
+enum slpc_task_enable {
+ SLPC_PARAM_TASK_DEFAULT = 0,
+ SLPC_PARAM_TASK_ENABLED,
+ SLPC_PARAM_TASK_DISABLED,
+ SLPC_PARAM_TASK_UNKNOWN
+};
+
+enum slpc_global_state {
+ SLPC_GLOBAL_STATE_NOT_RUNNING = 0,
+ SLPC_GLOBAL_STATE_INITIALIZING = 1,
+ SLPC_GLOBAL_STATE_RESETTING = 2,
+ SLPC_GLOBAL_STATE_RUNNING = 3,
+ SLPC_GLOBAL_STATE_SHUTTING_DOWN = 4,
+ SLPC_GLOBAL_STATE_ERROR = 5
+};
+
+enum slpc_param_id {
+ SLPC_PARAM_TASK_ENABLE_GTPERF = 0,
+ SLPC_PARAM_TASK_DISABLE_GTPERF = 1,
+ SLPC_PARAM_TASK_ENABLE_BALANCER = 2,
+ SLPC_PARAM_TASK_DISABLE_BALANCER = 3,
+ SLPC_PARAM_TASK_ENABLE_DCC = 4,
+ SLPC_PARAM_TASK_DISABLE_DCC = 5,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ = 6,
+ SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ = 7,
+ SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ = 8,
+ SLPC_PARAM_GLOBAL_MAX_GT_SLICE_FREQ_MHZ = 9,
+ SLPC_PARAM_GTPERF_THRESHOLD_MAX_FPS = 10,
+ SLPC_PARAM_GLOBAL_DISABLE_GT_FREQ_MANAGEMENT = 11,
+ SLPC_PARAM_GTPERF_ENABLE_FRAMERATE_STALLING = 12,
+ SLPC_PARAM_GLOBAL_DISABLE_RC6_MODE_CHANGE = 13,
+ SLPC_PARAM_GLOBAL_OC_UNSLICE_FREQ_MHZ = 14,
+ SLPC_PARAM_GLOBAL_OC_SLICE_FREQ_MHZ = 15,
+ SLPC_PARAM_GLOBAL_ENABLE_IA_GT_BALANCING = 16,
+ SLPC_PARAM_GLOBAL_ENABLE_ADAPTIVE_BURST_TURBO = 17,
+ SLPC_PARAM_GLOBAL_ENABLE_EVAL_MODE = 18,
+ SLPC_PARAM_GLOBAL_ENABLE_BALANCER_IN_NON_GAMING_MODE = 19,
+ SLPC_PARAM_GLOBAL_RT_MODE_TURBO_FREQ_DELTA_MHZ = 20,
+ SLPC_PARAM_PWRGATE_RC_MODE = 21,
+ SLPC_PARAM_EDR_MODE_COMPUTE_TIMEOUT_MS = 22,
+ SLPC_PARAM_EDR_QOS_FREQ_MHZ = 23,
+ SLPC_PARAM_MEDIA_FF_RATIO_MODE = 24,
+ SLPC_PARAM_ENABLE_IA_FREQ_LIMITING = 25,
+ SLPC_PARAM_STRATEGIES = 26,
+ SLPC_PARAM_POWER_PROFILE = 27,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY = 28,
+ SLPC_MAX_PARAM = 32,
+};
+
+enum slpc_media_ratio_mode {
+ SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL = 0,
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE = 1,
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO = 2,
+};
+
+enum slpc_event_id {
+ SLPC_EVENT_RESET = 0,
+ SLPC_EVENT_SHUTDOWN = 1,
+ SLPC_EVENT_PLATFORM_INFO_CHANGE = 2,
+ SLPC_EVENT_DISPLAY_MODE_CHANGE = 3,
+ SLPC_EVENT_FLIP_COMPLETE = 4,
+ SLPC_EVENT_QUERY_TASK_STATE = 5,
+ SLPC_EVENT_PARAMETER_SET = 6,
+ SLPC_EVENT_PARAMETER_UNSET = 7,
+};
+
+struct slpc_task_state_data {
+ union {
+ u32 task_status_padding;
+ struct {
+ u32 status;
+#define SLPC_GTPERF_TASK_ENABLED REG_BIT(0)
+#define SLPC_DCC_TASK_ENABLED REG_BIT(11)
+#define SLPC_IN_DCC REG_BIT(12)
+#define SLPC_BALANCER_ENABLED REG_BIT(15)
+#define SLPC_IBC_TASK_ENABLED REG_BIT(16)
+#define SLPC_BALANCER_IA_LMT_ENABLED REG_BIT(17)
+#define SLPC_BALANCER_IA_LMT_ACTIVE REG_BIT(18)
+ };
+ };
+ union {
+ u32 freq_padding;
+ struct {
+#define SLPC_MAX_UNSLICE_FREQ_MASK REG_GENMASK(7, 0)
+#define SLPC_MIN_UNSLICE_FREQ_MASK REG_GENMASK(15, 8)
+#define SLPC_MAX_SLICE_FREQ_MASK REG_GENMASK(23, 16)
+#define SLPC_MIN_SLICE_FREQ_MASK REG_GENMASK(31, 24)
+ u32 freq;
+ };
+ };
+} __packed;
+
+struct slpc_shared_data_header {
+ /* Total size in bytes of this shared buffer. */
+ u32 size;
+ u32 global_state;
+ u32 display_data_addr;
+} __packed;
+
+struct slpc_override_params {
+ u32 bits[SLPC_OVERRIDE_BITFIELD_SIZE];
+ u32 values[SLPC_MAX_OVERRIDE_PARAMETERS];
+} __packed;
+
+struct slpc_shared_data {
+ struct slpc_shared_data_header header;
+ u8 shared_data_header_pad[SLPC_SHARED_DATA_SIZE_BYTE_HEADER -
+ sizeof(struct slpc_shared_data_header)];
+
+ u8 platform_info_pad[SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO];
+
+ struct slpc_task_state_data task_state_data;
+ u8 task_state_data_pad[SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE -
+ sizeof(struct slpc_task_state_data)];
+
+ struct slpc_override_params override_params;
+ u8 override_params_pad[SLPC_OVERRIDE_PARAMS_TOTAL_BYTES -
+ sizeof(struct slpc_override_params)];
+
+ u8 shared_data_pad[SLPC_SHARED_DATA_SIZE_BYTE_OTHER];
+
+ /* PAGE 2 (4096 bytes), mode based parameter will be removed soon */
+ u8 reserved_mode_definition[4096];
+} __packed;
+
+/**
+ * DOC: SLPC H2G MESSAGE FORMAT
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_PC_SLPM_REQUEST` = 0x3003 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:8 | **EVENT_ID** |
+ * + +-------+--------------------------------------------------------------+
+ * | | 7:0 | **EVENT_ARGC** - number of data arguments |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **EVENT_DATA1** |
+ * +---+-------+--------------------------------------------------------------+
+ * |...| 31:0 | ... |
+ * +---+-------+--------------------------------------------------------------+
+ * |2+n| 31:0 | **EVENT_DATAn** |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST 0x3003
+
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_MIN_LEN \
+ (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS 9
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_MAX_LEN \
+ (HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
+ HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xff << 8)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xff << 0)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
new file mode 100644
index 000000000..28b8387f9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_COMMUNICATION_CTB_ABI_H
+#define _ABI_GUC_COMMUNICATION_CTB_ABI_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+#include "guc_messages_abi.h"
+
+/**
+ * DOC: CT Buffer
+ *
+ * Circular buffer used to send `CTB Message`_
+ */
+
+/**
+ * DOC: CTB Descriptor
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:0 | **HEAD** - offset (in dwords) to the last dword that was |
+ * | | | read from the `CT Buffer`_. |
+ * | | | It can only be updated by the receiver. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **TAIL** - offset (in dwords) to the last dword that was |
+ * | | | written to the `CT Buffer`_. |
+ * | | | It can only be updated by the sender. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **STATUS** - status of the CTB |
+ * | | | |
+ * | | | - _`GUC_CTB_STATUS_NO_ERROR` = 0 (normal operation) |
+ * | | | - _`GUC_CTB_STATUS_OVERFLOW` = 1 (head/tail too large) |
+ * | | | - _`GUC_CTB_STATUS_UNDERFLOW` = 2 (truncated message) |
+ * | | | - _`GUC_CTB_STATUS_MISMATCH` = 4 (head/tail modified) |
+ * | | | - _`GUC_CTB_STATUS_UNUSED` = 8 (CTB is not in use) |
+ * +---+-------+--------------------------------------------------------------+
+ * |...| | RESERVED = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ * | 15| 31:0 | RESERVED = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+struct guc_ct_buffer_desc {
+ u32 head;
+ u32 tail;
+ u32 status;
+#define GUC_CTB_STATUS_NO_ERROR 0
+#define GUC_CTB_STATUS_OVERFLOW BIT(0)
+#define GUC_CTB_STATUS_UNDERFLOW BIT(1)
+#define GUC_CTB_STATUS_MISMATCH BIT(2)
+#define GUC_CTB_STATUS_UNUSED BIT(3)
+ u32 reserved[13];
+} __packed;
+static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
+
+/**
+ * DOC: CTB Message
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:16 | **FENCE** - message identifier |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:12 | **FORMAT** - format of the CTB message |
+ * | | | - _`GUC_CTB_FORMAT_HXG` = 0 - see `CTB HXG Message`_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 11:8 | **RESERVED** |
+ * | +-------+--------------------------------------------------------------+
+ * | | 7:0 | **NUM_DWORDS** - length of the CTB message (w/o header) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | optional (depends on FORMAT) |
+ * +---+-------+ |
+ * |...| | |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_CTB_HDR_LEN 1u
+#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
+#define GUC_CTB_MSG_MAX_LEN 256u
+#define GUC_CTB_MSG_0_FENCE (0xffffU << 16)
+#define GUC_CTB_MSG_0_FORMAT (0xf << 12)
+#define GUC_CTB_FORMAT_HXG 0u
+#define GUC_CTB_MSG_0_RESERVED (0xf << 8)
+#define GUC_CTB_MSG_0_NUM_DWORDS (0xff << 0)
+
+/**
+ * DOC: CTB HXG Message
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:16 | FENCE |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:12 | FORMAT = GUC_CTB_FORMAT_HXG_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 11:8 | RESERVED = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | [Embedded `HXG Message`_] |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_CTB_HXG_MSG_MIN_LEN (GUC_CTB_MSG_MIN_LEN + GUC_HXG_MSG_MIN_LEN)
+#define GUC_CTB_HXG_MSG_MAX_LEN GUC_CTB_MSG_MAX_LEN
+
+/**
+ * DOC: CTB based communication
+ *
+ * The CTB (command transport buffer) communication between Host and GuC
+ * is based on u32 data stream written to the shared buffer. One buffer can
+ * be used to transmit data only in one direction (one-directional channel).
+ *
+ * Current status of the each buffer is stored in the buffer descriptor.
+ * Buffer descriptor holds tail and head fields that represents active data
+ * stream. The tail field is updated by the data producer (sender), and head
+ * field is updated by the data consumer (receiver)::
+ *
+ * +------------+
+ * | DESCRIPTOR | +=================+============+========+
+ * +============+ | | MESSAGE(s) | |
+ * | address |--------->+=================+============+========+
+ * +------------+
+ * | head | ^-----head--------^
+ * +------------+
+ * | tail | ^---------tail-----------------^
+ * +------------+
+ * | size | ^---------------size--------------------^
+ * +------------+
+ *
+ * Each message in data stream starts with the single u32 treated as a header,
+ * followed by optional set of u32 data that makes message specific payload::
+ *
+ * +------------+---------+---------+---------+
+ * | MESSAGE |
+ * +------------+---------+---------+---------+
+ * | msg[0] | [1] | ... | [n-1] |
+ * +------------+---------+---------+---------+
+ * | MESSAGE | MESSAGE PAYLOAD |
+ * + HEADER +---------+---------+---------+
+ * | | 0 | ... | n |
+ * +======+=====+=========+=========+=========+
+ * | 31:16| code| | | |
+ * +------+-----+ | | |
+ * | 15:5|flags| | | |
+ * +------+-----+ | | |
+ * | 4:0| len| | | |
+ * +------+-----+---------+---------+---------+
+ *
+ * ^-------------len-------------^
+ *
+ * The message header consists of:
+ *
+ * - **len**, indicates length of the message payload (in u32)
+ * - **code**, indicates message code
+ * - **flags**, holds various bits to control message handling
+ */
+
+/*
+ * Definition of the command transport message header (DW0)
+ *
+ * bit[4..0] message len (in dwords)
+ * bit[7..5] reserved
+ * bit[8] response (G2H only)
+ * bit[8] write fence to desc (H2G only)
+ * bit[9] write status to H2G buff (H2G only)
+ * bit[10] send status back via G2H (H2G only)
+ * bit[15..11] reserved
+ * bit[31..16] action code
+ */
+#define GUC_CT_MSG_LEN_SHIFT 0
+#define GUC_CT_MSG_LEN_MASK 0x1F
+#define GUC_CT_MSG_IS_RESPONSE (1 << 8)
+#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
+#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
+#define GUC_CT_MSG_SEND_STATUS (1 << 10)
+#define GUC_CT_MSG_ACTION_SHIFT 16
+#define GUC_CT_MSG_ACTION_MASK 0xFFFF
+
+#endif /* _ABI_GUC_COMMUNICATION_CTB_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
new file mode 100644
index 000000000..9baa3cb07
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_COMMUNICATION_MMIO_ABI_H
+#define _ABI_GUC_COMMUNICATION_MMIO_ABI_H
+
+/**
+ * DOC: GuC MMIO based communication
+ *
+ * The MMIO based communication between Host and GuC relies on special
+ * hardware registers which format could be defined by the software
+ * (so called scratch registers).
+ *
+ * Each MMIO based message, both Host to GuC (H2G) and GuC to Host (G2H)
+ * messages, which maximum length depends on number of available scratch
+ * registers, is directly written into those scratch registers.
+ *
+ * For Gen9+, there are 16 software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 4 parameters and the GuC firmware
+ * itself uses an 4-element array to store the H2G message.
+ *
+ * For Gen11+, there are additional 4 registers 0x190240-0x19024C, which
+ * are, regardless on lower count, preferred over legacy ones.
+ *
+ * The MMIO based communication is mainly used during driver initialization
+ * phase to setup the `CTB based communication`_ that will be used afterwards.
+ */
+
+#define GUC_MAX_MMIO_MSG_LEN 4
+
+/**
+ * DOC: MMIO HXG Message
+ *
+ * Format of the MMIO messages follows definitions of `HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:0 | |
+ * +---+-------+ |
+ * |...| | [Embedded `HXG Message`_] |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#endif /* _ABI_GUC_COMMUNICATION_MMIO_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
new file mode 100644
index 000000000..8085fb181
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_ERRORS_ABI_H
+#define _ABI_GUC_ERRORS_ABI_H
+
+enum intel_guc_response_status {
+ INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+ INTEL_GUC_RESPONSE_NOT_SUPPORTED = 0x20,
+ INTEL_GUC_RESPONSE_NO_ATTRIBUTE_TABLE = 0x201,
+ INTEL_GUC_RESPONSE_NO_DECRYPTION_KEY = 0x202,
+ INTEL_GUC_RESPONSE_DECRYPTION_FAILED = 0x204,
+ INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
+};
+
+enum intel_guc_load_status {
+ INTEL_GUC_LOAD_STATUS_DEFAULT = 0x00,
+ INTEL_GUC_LOAD_STATUS_START = 0x01,
+ INTEL_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH = 0x02,
+ INTEL_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH = 0x03,
+ INTEL_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE = 0x04,
+ INTEL_GUC_LOAD_STATUS_GDT_DONE = 0x10,
+ INTEL_GUC_LOAD_STATUS_IDT_DONE = 0x20,
+ INTEL_GUC_LOAD_STATUS_LAPIC_DONE = 0x30,
+ INTEL_GUC_LOAD_STATUS_GUCINT_DONE = 0x40,
+ INTEL_GUC_LOAD_STATUS_DPC_READY = 0x50,
+ INTEL_GUC_LOAD_STATUS_DPC_ERROR = 0x60,
+ INTEL_GUC_LOAD_STATUS_EXCEPTION = 0x70,
+ INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID = 0x71,
+ INTEL_GUC_LOAD_STATUS_PXP_TEARDOWN_CTRL_ENABLED = 0x72,
+ INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
+ INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
+ INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
+
+ INTEL_GUC_LOAD_STATUS_READY = 0xF0,
+};
+
+#endif /* _ABI_GUC_ERRORS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
new file mode 100644
index 000000000..4a59478c3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_KLVS_ABI_H
+#define _ABI_GUC_KLVS_ABI_H
+
+#include <linux/types.h>
+
+/**
+ * DOC: GuC KLV
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:16 | **KEY** - KLV key identifier |
+ * | | | - `GuC Self Config KLVs`_ |
+ * | | | |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **LEN** - length of VALUE (in 32bit dwords) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VALUE** - actual value of the KLV (format depends on KEY) |
+ * +---+-------+ |
+ * |...| | |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_KLV_LEN_MIN 1u
+#define GUC_KLV_0_KEY (0xffff << 16)
+#define GUC_KLV_0_LEN (0xffff << 0)
+#define GUC_KLV_n_VALUE (0xffffffff << 0)
+
+/**
+ * DOC: GuC Self Config KLVs
+ *
+ * `GuC KLV`_ keys available for use with HOST2GUC_SELF_CFG_.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_ADDR` : 0x0902
+ * Refers to 64 bit Global Gfx address of H2G `CT Buffer`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR` : 0x0903
+ * Refers to 64 bit Global Gfx address of H2G `CTB Descriptor`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_SIZE` : 0x0904
+ * Refers to size of H2G `CT Buffer`_ in bytes.
+ * Should be a multiple of 4K.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_ADDR` : 0x0905
+ * Refers to 64 bit Global Gfx address of G2H `CT Buffer`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR` : 0x0906
+ * Refers to 64 bit Global Gfx address of G2H `CTB Descriptor`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_SIZE` : 0x0907
+ * Refers to size of G2H `CT Buffer`_ in bytes.
+ * Should be a multiple of 4K.
+ */
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY 0x0902
+#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY 0x0903
+#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY 0x0904
+#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_LEN 1u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY 0x0905
+#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY 0x0906
+#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907
+#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u
+
+/*
+ * Per context scheduling policy update keys.
+ */
+enum {
+ GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM = 0x2001,
+ GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT = 0x2002,
+ GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 0x2003,
+ GUC_CONTEXT_POLICIES_KLV_ID_PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY = 0x2004,
+ GUC_CONTEXT_POLICIES_KLV_ID_SLPM_GT_FREQUENCY = 0x2005,
+
+ GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
+};
+
+#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
new file mode 100644
index 000000000..7d5ba4d97
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_MESSAGES_ABI_H
+#define _ABI_GUC_MESSAGES_ABI_H
+
+/**
+ * DOC: HXG Message
+ *
+ * All messages exchanged with GuC are defined using 32 bit dwords.
+ * First dword is treated as a message header. Remaining dwords are optional.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | | | |
+ * | 0 | 31 | **ORIGIN** - originator of the message |
+ * | | | - _`GUC_HXG_ORIGIN_HOST` = 0 |
+ * | | | - _`GUC_HXG_ORIGIN_GUC` = 1 |
+ * | | | |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | **TYPE** - message type |
+ * | | | - _`GUC_HXG_TYPE_REQUEST` = 0 |
+ * | | | - _`GUC_HXG_TYPE_EVENT` = 1 |
+ * | | | - _`GUC_HXG_TYPE_NO_RESPONSE_BUSY` = 3 |
+ * | | | - _`GUC_HXG_TYPE_NO_RESPONSE_RETRY` = 5 |
+ * | | | - _`GUC_HXG_TYPE_RESPONSE_FAILURE` = 6 |
+ * | | | - _`GUC_HXG_TYPE_RESPONSE_SUCCESS` = 7 |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **AUX** - auxiliary data (depends on TYPE) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | **PAYLOAD** - optional payload (depends on TYPE) |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_MSG_MIN_LEN 1u
+#define GUC_HXG_MSG_0_ORIGIN (0x1U << 31)
+#define GUC_HXG_ORIGIN_HOST 0u
+#define GUC_HXG_ORIGIN_GUC 1u
+#define GUC_HXG_MSG_0_TYPE (0x7 << 28)
+#define GUC_HXG_TYPE_REQUEST 0u
+#define GUC_HXG_TYPE_EVENT 1u
+#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
+#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
+#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
+#define GUC_HXG_TYPE_RESPONSE_SUCCESS 7u
+#define GUC_HXG_MSG_0_AUX (0xfffffff << 0)
+#define GUC_HXG_MSG_n_PAYLOAD (0xffffffff << 0)
+
+/**
+ * DOC: HXG Request
+ *
+ * The `HXG Request`_ message should be used to initiate synchronous activity
+ * for which confirmation or return data is expected.
+ *
+ * The recipient of this message shall use `HXG Response`_, `HXG Failure`_
+ * or `HXG Retry`_ message as a definite reply, and may use `HXG Busy`_
+ * message as a intermediate reply.
+ *
+ * Format of @DATA0 and all @DATAn fields depends on the @ACTION code.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | **DATA0** - request data (depends on ACTION) |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **ACTION** - requested action code |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | **DATAn** - optional data (depends on ACTION) |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfff << 16)
+#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffff << 0)
+#define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
+
+/**
+ * DOC: HXG Event
+ *
+ * The `HXG Event`_ message should be used to initiate asynchronous activity
+ * that does not involves immediate confirmation nor data.
+ *
+ * Format of @DATA0 and all @DATAn fields depends on the @ACTION code.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | **DATA0** - event data (depends on ACTION) |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **ACTION** - event action code |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | **DATAn** - optional event data (depends on ACTION) |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_EVENT_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfff << 16)
+#define GUC_HXG_EVENT_MSG_0_ACTION (0xffff << 0)
+#define GUC_HXG_EVENT_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
+
+/**
+ * DOC: HXG Busy
+ *
+ * The `HXG Busy`_ message may be used to acknowledge reception of the `HXG Request`_
+ * message if the recipient expects that it processing will be longer than default
+ * timeout.
+ *
+ * The @COUNTER field may be used as a progress indicator.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_NO_RESPONSE_BUSY_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **COUNTER** - progress indicator |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_BUSY_MSG_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_BUSY_MSG_0_COUNTER GUC_HXG_MSG_0_AUX
+
+/**
+ * DOC: HXG Retry
+ *
+ * The `HXG Retry`_ message should be used by recipient to indicate that the
+ * `HXG Request`_ message was dropped and it should be resent again.
+ *
+ * The @REASON field may be used to provide additional information.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_NO_RESPONSE_RETRY_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **REASON** - reason for retry |
+ * | | | - _`GUC_HXG_RETRY_REASON_UNSPECIFIED` = 0 |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_RETRY_MSG_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_RETRY_MSG_0_REASON GUC_HXG_MSG_0_AUX
+#define GUC_HXG_RETRY_REASON_UNSPECIFIED 0u
+
+/**
+ * DOC: HXG Failure
+ *
+ * The `HXG Failure`_ message shall be used as a reply to the `HXG Request`_
+ * message that could not be processed due to an error.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_FAILURE_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | **HINT** - additional error hint |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **ERROR** - error/result code |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_FAILURE_MSG_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_FAILURE_MSG_0_HINT (0xfff << 16)
+#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffff << 0)
+
+/**
+ * DOC: HXG Response
+ *
+ * The `HXG Response`_ message shall be used as a reply to the `HXG Request`_
+ * message that was successfully processed without an error.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **DATA0** - data (depends on ACTION from `HXG Request`_) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | **DATAn** - data (depends on ACTION from `HXG Request`_) |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_RESPONSE_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_RESPONSE_MSG_0_DATA0 GUC_HXG_MSG_0_AUX
+#define GUC_HXG_RESPONSE_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
+
+/* deprecated */
+#define INTEL_GUC_MSG_TYPE_SHIFT 28
+#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
+#define INTEL_GUC_MSG_DATA_SHIFT 16
+#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
+#define INTEL_GUC_MSG_CODE_SHIFT 0
+#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
+
+enum intel_guc_msg_type {
+ INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
+ INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
+};
+
+#endif /* _ABI_GUC_MESSAGES_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
new file mode 100644
index 000000000..3624abfd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2022 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_CAPTURE_FWIF_H
+#define _INTEL_GUC_CAPTURE_FWIF_H
+
+#include <linux/types.h>
+#include "intel_guc_fwif.h"
+
+struct intel_guc;
+struct file;
+
+/**
+ * struct __guc_capture_bufstate
+ *
+ * Book-keeping structure used to track read and write pointers
+ * as we extract error capture data from the GuC-log-buffer's
+ * error-capture region as a stream of dwords.
+ */
+struct __guc_capture_bufstate {
+ u32 size;
+ void *data;
+ u32 rd;
+ u32 wr;
+};
+
+/**
+ * struct __guc_capture_parsed_output - extracted error capture node
+ *
+ * A single unit of extracted error-capture output data grouped together
+ * at an engine-instance level. We keep these nodes in a linked list.
+ * See cachelist and outlist below.
+ */
+struct __guc_capture_parsed_output {
+ /*
+ * A single set of 3 capture lists: a global-list
+ * an engine-class-list and an engine-instance list.
+ * outlist in __guc_capture_parsed_output will keep
+ * a linked list of these nodes that will eventually
+ * be detached from outlist and attached into to
+ * i915_gpu_codedump in response to a context reset
+ */
+ struct list_head link;
+ bool is_partial;
+ u32 eng_class;
+ u32 eng_inst;
+ u32 guc_id;
+ u32 lrca;
+ struct gcap_reg_list_info {
+ u32 vfid;
+ u32 num_regs;
+ struct guc_mmio_reg *regs;
+ } reginfo[GUC_CAPTURE_LIST_TYPE_MAX];
+#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_CAPTURE_LIST_TYPE_GLOBAL)
+#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS)
+#define GCAP_PARSED_REGLIST_INDEX_ENGINST BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE)
+};
+
+/**
+ * struct guc_debug_capture_list_header / struct guc_debug_capture_list
+ *
+ * As part of ADS registration, these header structures (followed by
+ * an array of 'struct guc_mmio_reg' entries) are used to register with
+ * GuC microkernel the list of registers we want it to dump out prior
+ * to a engine reset.
+ */
+struct guc_debug_capture_list_header {
+ u32 info;
+#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0)
+} __packed;
+
+struct guc_debug_capture_list {
+ struct guc_debug_capture_list_header header;
+ struct guc_mmio_reg regs[0];
+} __packed;
+
+/**
+ * struct __guc_mmio_reg_descr / struct __guc_mmio_reg_descr_group
+ *
+ * intel_guc_capture module uses these structures to maintain static
+ * tables (per unique platform) that consists of lists of registers
+ * (offsets, names, flags,...) that are used at the ADS regisration
+ * time as well as during runtime processing and reporting of error-
+ * capture states generated by GuC just prior to engine reset events.
+ */
+struct __guc_mmio_reg_descr {
+ i915_reg_t reg;
+ u32 flags;
+ u32 mask;
+ const char *regname;
+};
+
+struct __guc_mmio_reg_descr_group {
+ const struct __guc_mmio_reg_descr *list;
+ u32 num_regs;
+ u32 owner; /* see enum guc_capture_owner */
+ u32 type; /* see enum guc_capture_type */
+ u32 engine; /* as per MAX_ENGINE_CLASS */
+ struct __guc_mmio_reg_descr *extlist; /* only used for steered registers */
+};
+
+/**
+ * struct guc_state_capture_header_t / struct guc_state_capture_t /
+ * guc_state_capture_group_header_t / guc_state_capture_group_t
+ *
+ * Prior to resetting engines that have hung or faulted, GuC microkernel
+ * reports the engine error-state (register values that was read) by
+ * logging them into the shared GuC log buffer using these hierarchy
+ * of structures.
+ */
+struct guc_state_capture_header_t {
+ u32 owner;
+#define CAP_HDR_CAPTURE_VFID GENMASK(7, 0)
+ u32 info;
+#define CAP_HDR_CAPTURE_TYPE GENMASK(3, 0) /* see enum guc_capture_type */
+#define CAP_HDR_ENGINE_CLASS GENMASK(7, 4) /* see GUC_MAX_ENGINE_CLASSES */
+#define CAP_HDR_ENGINE_INSTANCE GENMASK(11, 8)
+ u32 lrca; /* if type-instance, LRCA (address) that hung, else set to ~0 */
+ u32 guc_id; /* if type-instance, context index of hung context, else set to ~0 */
+ u32 num_mmios;
+#define CAP_HDR_NUM_MMIOS GENMASK(9, 0)
+} __packed;
+
+struct guc_state_capture_t {
+ struct guc_state_capture_header_t header;
+ struct guc_mmio_reg mmio_entries[0];
+} __packed;
+
+enum guc_capture_group_types {
+ GUC_STATE_CAPTURE_GROUP_TYPE_FULL,
+ GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL,
+ GUC_STATE_CAPTURE_GROUP_TYPE_MAX,
+};
+
+struct guc_state_capture_group_header_t {
+ u32 owner;
+#define CAP_GRP_HDR_CAPTURE_VFID GENMASK(7, 0)
+ u32 info;
+#define CAP_GRP_HDR_NUM_CAPTURES GENMASK(7, 0)
+#define CAP_GRP_HDR_CAPTURE_TYPE GENMASK(15, 8) /* guc_capture_group_types */
+} __packed;
+
+/* this is the top level structure where an error-capture dump starts */
+struct guc_state_capture_group_t {
+ struct guc_state_capture_group_header_t grp_header;
+ struct guc_state_capture_t capture_entries[0];
+} __packed;
+
+/**
+ * struct __guc_capture_ads_cache
+ *
+ * A structure to cache register lists that were populated and registered
+ * with GuC at startup during ADS registration. This allows much quicker
+ * GuC resets without re-parsing all the tables for the given gt.
+ */
+struct __guc_capture_ads_cache {
+ bool is_valid;
+ void *ptr;
+ size_t size;
+ int status;
+};
+
+/**
+ * struct intel_guc_state_capture
+ *
+ * Internal context of the intel_guc_capture module.
+ */
+struct intel_guc_state_capture {
+ /**
+ * @reglists: static table of register lists used for error-capture state.
+ */
+ const struct __guc_mmio_reg_descr_group *reglists;
+
+ /**
+ * @extlists: allocated table of steered register lists used for error-capture state.
+ *
+ * NOTE: steered registers have multiple instances depending on the HW configuration
+ * (slices or dual-sub-slices) and thus depends on HW fuses discovered at startup
+ */
+ struct __guc_mmio_reg_descr_group *extlists;
+
+ /**
+ * @ads_cache: cached register lists that is ADS format ready
+ */
+ struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX]
+ [GUC_CAPTURE_LIST_TYPE_MAX]
+ [GUC_MAX_ENGINE_CLASSES];
+ void *ads_null_cache;
+
+ /**
+ * @cachelist: Pool of pre-allocated nodes for error capture output
+ *
+ * We need this pool of pre-allocated nodes because we cannot
+ * dynamically allocate new nodes when receiving the G2H notification
+ * because the event handlers for all G2H event-processing is called
+ * by the ct processing worker queue and when that queue is being
+ * processed, there is no absoluate guarantee that we are not in the
+ * midst of a GT reset operation (which doesn't allow allocations).
+ */
+ struct list_head cachelist;
+#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS)
+#define PREALLOC_NODES_DEFAULT_NUMREGS 64
+ int max_mmio_per_node;
+
+ /**
+ * @outlist: Pool of pre-allocated nodes for error capture output
+ *
+ * A linked list of parsed GuC error-capture output data before
+ * reporting with formatting via i915_gpu_coredump. Each node in this linked list shall
+ * contain a single engine-capture including global, engine-class and
+ * engine-instance register dumps as per guc_capture_parsed_output_node
+ */
+ struct list_head outlist;
+};
+
+#endif /* _INTEL_GUC_CAPTURE_FWIF_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
new file mode 100644
index 000000000..bac06e3d6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
+#include "gt/intel_gt_regs.h"
+#include "intel_guc.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_slpc.h"
+#include "intel_guc_submission.h"
+#include "i915_drv.h"
+#include "i915_irq.h"
+
+/**
+ * DOC: GuC
+ *
+ * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
+ * designed to offload some of the functionality usually performed by the host
+ * driver; currently the main operations it can take care of are:
+ *
+ * - Authentication of the HuC, which is required to fully enable HuC usage.
+ * - Low latency graphics context scheduling (a.k.a. GuC submission).
+ * - GT Power management.
+ *
+ * The enable_guc module parameter can be used to select which of those
+ * operations to enable within GuC. Note that not all the operations are
+ * supported on all gen9+ platforms.
+ *
+ * Enabling the GuC is not mandatory and therefore the firmware is only loaded
+ * if at least one of the operations is selected. However, not loading the GuC
+ * might result in the loss of some features that do require the GuC (currently
+ * just the HuC, but more are expected to land in the future).
+ */
+
+void intel_guc_notify(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ /*
+ * On Gen11+, the value written to the register is passes as a payload
+ * to the FW. However, the FW currently treats all values the same way
+ * (H2G interrupt), so we can just write the value that the HW expects
+ * on older gens.
+ */
+ intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
+}
+
+static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
+{
+ GEM_BUG_ON(!guc->send_regs.base);
+ GEM_BUG_ON(!guc->send_regs.count);
+ GEM_BUG_ON(i >= guc->send_regs.count);
+
+ return _MMIO(guc->send_regs.base + 4 * i);
+}
+
+void intel_guc_init_send_regs(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ enum forcewake_domains fw_domains = 0;
+ unsigned int i;
+
+ GEM_BUG_ON(!guc->send_regs.base);
+ GEM_BUG_ON(!guc->send_regs.count);
+
+ for (i = 0; i < guc->send_regs.count; i++) {
+ fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
+ guc_send_reg(guc, i),
+ FW_REG_READ | FW_REG_WRITE);
+ }
+ guc->send_regs.fw_domains = fw_domains;
+}
+
+static void gen9_reset_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static void gen9_enable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(gt->irq_lock);
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+ gt->pm_guc_events);
+ gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static void gen9_disable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(gt->irq_lock);
+
+ gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
+
+ spin_unlock_irq(gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen9_reset_guc_interrupts(guc);
+}
+
+static void gen11_reset_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static void gen11_enable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+ spin_lock_irq(gt->irq_lock);
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_ENABLE, events);
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_MASK, ~events);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static void gen11_disable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(gt->irq_lock);
+
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+
+ spin_unlock_irq(gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen11_reset_guc_interrupts(guc);
+}
+
+void intel_guc_init_early(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
+ intel_guc_ct_init_early(&guc->ct);
+ intel_guc_log_init_early(&guc->log);
+ intel_guc_submission_init_early(guc);
+ intel_guc_slpc_init_early(&guc->slpc);
+ intel_guc_rc_init_early(guc);
+
+ mutex_init(&guc->send_mutex);
+ spin_lock_init(&guc->irq_lock);
+ if (GRAPHICS_VER(i915) >= 11) {
+ guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
+ guc->interrupts.reset = gen11_reset_guc_interrupts;
+ guc->interrupts.enable = gen11_enable_guc_interrupts;
+ guc->interrupts.disable = gen11_disable_guc_interrupts;
+ guc->send_regs.base =
+ i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
+ guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
+
+ } else {
+ guc->notify_reg = GUC_SEND_INTERRUPT;
+ guc->interrupts.reset = gen9_reset_guc_interrupts;
+ guc->interrupts.enable = gen9_enable_guc_interrupts;
+ guc->interrupts.disable = gen9_disable_guc_interrupts;
+ guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
+ guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+ BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
+ }
+
+ intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
+}
+
+void intel_guc_init_late(struct intel_guc *guc)
+{
+ intel_guc_ads_init_late(guc);
+}
+
+static u32 guc_ctl_debug_flags(struct intel_guc *guc)
+{
+ u32 level = intel_guc_log_get_level(&guc->log);
+ u32 flags = 0;
+
+ if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
+ flags |= GUC_LOG_DISABLED;
+ else
+ flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
+ GUC_LOG_VERBOSITY_SHIFT;
+
+ return flags;
+}
+
+static u32 guc_ctl_feature_flags(struct intel_guc *guc)
+{
+ u32 flags = 0;
+
+ if (!intel_guc_submission_is_used(guc))
+ flags |= GUC_CTL_DISABLE_SCHEDULER;
+
+ if (intel_guc_slpc_is_used(guc))
+ flags |= GUC_CTL_ENABLE_SLPC;
+
+ return flags;
+}
+
+static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
+{
+ struct intel_guc_log *log = &guc->log;
+ u32 offset, flags;
+
+ GEM_BUG_ON(!log->sizes_initialised);
+
+ offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
+
+ flags = GUC_LOG_VALID |
+ GUC_LOG_NOTIFY_ON_HALF_FULL |
+ log->sizes[GUC_LOG_SECTIONS_DEBUG].flag |
+ log->sizes[GUC_LOG_SECTIONS_CAPTURE].flag |
+ (log->sizes[GUC_LOG_SECTIONS_CRASH].count << GUC_LOG_CRASH_SHIFT) |
+ (log->sizes[GUC_LOG_SECTIONS_DEBUG].count << GUC_LOG_DEBUG_SHIFT) |
+ (log->sizes[GUC_LOG_SECTIONS_CAPTURE].count << GUC_LOG_CAPTURE_SHIFT) |
+ (offset << GUC_LOG_BUF_ADDR_SHIFT);
+
+ return flags;
+}
+
+static u32 guc_ctl_ads_flags(struct intel_guc *guc)
+{
+ u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
+ u32 flags = ads << GUC_ADS_ADDR_SHIFT;
+
+ return flags;
+}
+
+static u32 guc_ctl_wa_flags(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 flags = 0;
+
+ /* Wa_22012773006:gen11,gen12 < XeHP */
+ if (GRAPHICS_VER(gt->i915) >= 11 &&
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ flags |= GUC_WA_POLLCS;
+
+ /* Wa_16011759253:dg2_g10:a0 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+ flags |= GUC_WA_GAM_CREDITS;
+
+ /* Wa_14014475959:dg2 */
+ if (IS_DG2(gt->i915))
+ flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+
+ /*
+ * Wa_14012197797:dg2_g10:a0,dg2_g11:a0
+ * Wa_22011391025:dg2_g10,dg2_g11,dg2_g12
+ *
+ * The same WA bit is used for both and 22011391025 is applicable to
+ * all DG2.
+ */
+ if (IS_DG2(gt->i915))
+ flags |= GUC_WA_DUAL_QUEUE;
+
+ /* Wa_22011802037: graphics version 11/12 */
+ if (IS_GRAPHICS_VER(gt->i915, 11, 12))
+ flags |= GUC_WA_PRE_PARSER;
+
+ /* Wa_16011777198:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
+ flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
+
+ /*
+ * Wa_22012727170:dg2_g10[a0-c0), dg2_g11[a0..)
+ * Wa_22012727685:dg2_g11[a0..)
+ */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER))
+ flags |= GUC_WA_CONTEXT_ISOLATION;
+
+ /* Wa_16015675438 */
+ if (!RCS_MASK(gt))
+ flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
+
+ return flags;
+}
+
+static u32 guc_ctl_devid(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
+}
+
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+static void guc_init_params(struct intel_guc *guc)
+{
+ u32 *params = guc->params;
+ int i;
+
+ BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
+
+ params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
+ params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
+ params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
+ params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
+ params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
+ params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
+
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
+}
+
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+void intel_guc_write_params(struct intel_guc *guc)
+{
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
+ int i;
+
+ /*
+ * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
+ * they are power context saved so it's ok to release forcewake
+ * when we are done here and take it again at xfer time.
+ */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
+
+ intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
+
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
+}
+
+void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+ u32 stamp = 0;
+ u64 ktime;
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
+ ktime = ktime_get_boottime_ns();
+
+ drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
+ drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
+ drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
+ gt->clock_frequency, gt->clock_period_ns);
+}
+
+int intel_guc_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ ret = intel_uc_fw_init(&guc->fw);
+ if (ret)
+ goto out;
+
+ ret = intel_guc_log_create(&guc->log);
+ if (ret)
+ goto err_fw;
+
+ ret = intel_guc_capture_init(guc);
+ if (ret)
+ goto err_log;
+
+ ret = intel_guc_ads_create(guc);
+ if (ret)
+ goto err_capture;
+
+ GEM_BUG_ON(!guc->ads_vma);
+
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
+
+ if (intel_guc_submission_is_used(guc)) {
+ /*
+ * This is stuff we need to have available at fw load time
+ * if we are planning to enable submission later
+ */
+ ret = intel_guc_submission_init(guc);
+ if (ret)
+ goto err_ct;
+ }
+
+ if (intel_guc_slpc_is_used(guc)) {
+ ret = intel_guc_slpc_init(&guc->slpc);
+ if (ret)
+ goto err_submission;
+ }
+
+ /* now that everything is perma-pinned, initialize the parameters */
+ guc_init_params(guc);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(gt->ggtt);
+
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
+ return 0;
+
+err_submission:
+ intel_guc_submission_fini(guc);
+err_ct:
+ intel_guc_ct_fini(&guc->ct);
+err_ads:
+ intel_guc_ads_destroy(guc);
+err_capture:
+ intel_guc_capture_destroy(guc);
+err_log:
+ intel_guc_log_destroy(&guc->log);
+err_fw:
+ intel_uc_fw_fini(&guc->fw);
+out:
+ i915_probe_error(gt->i915, "failed with %d\n", ret);
+ return ret;
+}
+
+void intel_guc_fini(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ if (!intel_uc_fw_is_loadable(&guc->fw))
+ return;
+
+ i915_ggtt_disable_guc(gt->ggtt);
+
+ if (intel_guc_slpc_is_used(guc))
+ intel_guc_slpc_fini(&guc->slpc);
+
+ if (intel_guc_submission_is_used(guc))
+ intel_guc_submission_fini(guc);
+
+ intel_guc_ct_fini(&guc->ct);
+
+ intel_guc_ads_destroy(guc);
+ intel_guc_capture_destroy(guc);
+ intel_guc_log_destroy(&guc->log);
+ intel_uc_fw_fini(&guc->fw);
+}
+
+/*
+ * This function implements the MMIO based host to GuC interface.
+ */
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
+ u32 *response_buf, u32 response_buf_size)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
+ u32 header;
+ int i;
+ int ret;
+
+ GEM_BUG_ON(!len);
+ GEM_BUG_ON(len > guc->send_regs.count);
+
+ GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
+ GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
+
+ mutex_lock(&guc->send_mutex);
+ intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
+
+retry:
+ for (i = 0; i < len; i++)
+ intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
+
+ intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
+
+ intel_guc_notify(guc);
+
+ /*
+ * No GuC command should ever take longer than 10ms.
+ * Fast commands should still complete in 10us.
+ */
+ ret = __intel_wait_for_register_fw(uncore,
+ guc_send_reg(guc, 0),
+ GUC_HXG_MSG_0_ORIGIN,
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
+ GUC_HXG_ORIGIN_GUC),
+ 10, 10, &header);
+ if (unlikely(ret)) {
+timeout:
+ drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
+ request[0], header);
+ goto out;
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
+#define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
+ FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
+ FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
+
+ ret = wait_for(done, 1000);
+ if (unlikely(ret))
+ goto timeout;
+ if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
+ GUC_HXG_ORIGIN_GUC))
+ goto proto;
+#undef done
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
+ u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
+
+ drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
+ request[0], reason);
+ goto retry;
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
+ u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
+ u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
+
+ drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
+ request[0], error, hint);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
+proto:
+ drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
+ request[0], header);
+ ret = -EPROTO;
+ goto out;
+ }
+
+ if (response_buf) {
+ int count = min(response_buf_size, guc->send_regs.count);
+
+ GEM_BUG_ON(!count);
+
+ response_buf[0] = header;
+
+ for (i = 1; i < count; i++)
+ response_buf[i] = intel_uncore_read(uncore,
+ guc_send_reg(guc, i));
+
+ /* Use number of copied dwords as our return value */
+ ret = count;
+ } else {
+ /* Use data from the GuC response as our return value */
+ ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
+ }
+
+out:
+ intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
+ mutex_unlock(&guc->send_mutex);
+
+ return ret;
+}
+
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+ const u32 *payload, u32 len)
+{
+ u32 msg;
+
+ if (unlikely(!len))
+ return -EPROTO;
+
+ /* Make sure to handle only enabled messages */
+ msg = payload[0] & guc->msg_enabled_mask;
+
+ if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
+ drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC crash dump notification!\n");
+ if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
+ drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC exception notification!\n");
+
+ return 0;
+}
+
+/**
+ * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
+ * @guc: intel_guc structure
+ * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
+ *
+ * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
+ * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
+ * intel_huc_auth().
+ *
+ * Return: non-zero code on error
+ */
+int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_AUTHENTICATE_HUC,
+ rsa_offset
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+/**
+ * intel_guc_suspend() - notify GuC entering suspend state
+ * @guc: the guc
+ */
+int intel_guc_suspend(struct intel_guc *guc)
+{
+ int ret;
+ u32 action[] = {
+ INTEL_GUC_ACTION_CLIENT_SOFT_RESET,
+ };
+
+ if (!intel_guc_is_ready(guc))
+ return 0;
+
+ if (intel_guc_submission_is_used(guc)) {
+ /*
+ * This H2G MMIO command tears down the GuC in two steps. First it will
+ * generate a G2H CTB for every active context indicating a reset. In
+ * practice the i915 shouldn't ever get a G2H as suspend should only be
+ * called when the GPU is idle. Next, it tears down the CTBs and this
+ * H2G MMIO command completes.
+ *
+ * Don't abort on a failure code from the GuC. Keep going and do the
+ * clean up in santize() and re-initialisation on resume and hopefully
+ * the error here won't be problematic.
+ */
+ ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+ if (ret)
+ DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
+ }
+
+ /* Signal that the GuC isn't running. */
+ intel_guc_sanitize(guc);
+
+ return 0;
+}
+
+/**
+ * intel_guc_resume() - notify GuC resuming from suspend state
+ * @guc: the guc
+ */
+int intel_guc_resume(struct intel_guc *guc)
+{
+ /*
+ * NB: This function can still be called even if GuC submission is
+ * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
+ * if any code is later added here, it must be support doing nothing
+ * if submission is disabled (as per intel_guc_suspend).
+ */
+ return 0;
+}
+
+/**
+ * DOC: GuC Memory Management
+ *
+ * GuC can't allocate any memory for its own usage, so all the allocations must
+ * be handled by the host driver. GuC accesses the memory via the GGTT, with the
+ * exception of the top and bottom parts of the 4GB address space, which are
+ * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
+ * or other parts of the HW. The driver must take care not to place objects that
+ * the GuC is going to access in these reserved ranges. The layout of the GuC
+ * address space is shown below:
+ *
+ * ::
+ *
+ * +===========> +====================+ <== FFFF_FFFF
+ * ^ | Reserved |
+ * | +====================+ <== GUC_GGTT_TOP
+ * | | |
+ * | | DRAM |
+ * GuC | |
+ * Address +===> +====================+ <== GuC ggtt_pin_bias
+ * Space ^ | |
+ * | | | |
+ * | GuC | GuC |
+ * | WOPCM | WOPCM |
+ * | Size | |
+ * | | | |
+ * v v | |
+ * +=======+===> +====================+ <== 0000_0000
+ *
+ * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
+ * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
+ * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
+ */
+
+/**
+ * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * @guc: the guc
+ * @size: size of area to allocate (both virtual space and memory)
+ *
+ * This is a wrapper to create an object for use with the GuC. In order to
+ * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
+ * both some backing storage and a range inside the Global GTT. We must pin
+ * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
+ * range is reserved inside GuC.
+ *
+ * Return: A i915_vma if successful, otherwise an ERR_PTR.
+ */
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u64 flags;
+ int ret;
+
+ if (HAS_LMEM(gt->i915))
+ obj = i915_gem_object_create_lmem(gt->i915, size,
+ I915_BO_ALLOC_CPU_CLEAR |
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_PM_EARLY);
+ else
+ obj = i915_gem_object_create_shmem(gt->i915, size);
+
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+ ret = i915_ggtt_pin(vma, NULL, 0, flags);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ return i915_vma_make_unshrinkable(vma);
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+/**
+ * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
+ * @guc: the guc
+ * @size: size of area to allocate (both virtual space and memory)
+ * @out_vma: return variable for the allocated vma pointer
+ * @out_vaddr: return variable for the obj mapping
+ *
+ * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
+ * object with I915_MAP_WB.
+ *
+ * Return: 0 if successful, a negative errno code otherwise.
+ */
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+ struct i915_vma **out_vma, void **out_vaddr)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(guc_to_gt(guc)->i915,
+ vma->obj, true));
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ return PTR_ERR(vaddr);
+ }
+
+ *out_vma = vma;
+ *out_vaddr = vaddr;
+
+ return 0;
+}
+
+static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
+{
+ u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)),
+ };
+ int ret;
+
+ GEM_BUG_ON(len > 2);
+ GEM_BUG_ON(len == 1 && upper_32_bits(value));
+
+ /* Self config must go over MMIO */
+ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+
+ if (unlikely(ret < 0))
+ return ret;
+ if (unlikely(ret > 1))
+ return -EPROTO;
+ if (unlikely(!ret))
+ return -ENOKEY;
+
+ return 0;
+}
+
+static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int err = __guc_action_self_cfg(guc, key, len, value);
+
+ if (unlikely(err))
+ i915_probe_error(i915, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
+ ERR_PTR(err), key, value);
+ return err;
+}
+
+int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
+{
+ return __guc_self_cfg(guc, key, 1, value);
+}
+
+int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
+{
+ return __guc_self_cfg(guc, key, 2, value);
+}
+
+/**
+ * intel_guc_load_status - dump information about GuC load status
+ * @guc: the GuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC load status.
+ */
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_supported(guc)) {
+ drm_printf(p, "GuC not supported\n");
+ return;
+ }
+
+ if (!intel_guc_is_wanted(guc)) {
+ drm_printf(p, "GuC disabled\n");
+ return;
+ }
+
+ intel_uc_fw_dump(&guc->fw, p);
+
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ u32 status = intel_uncore_read(uncore, GUC_STATUS);
+ u32 i;
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < 16; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
+ }
+ }
+}
+
+void intel_guc_write_barrier(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
+ /*
+ * Ensure intel_uncore_write_fw can be used rather than
+ * intel_uncore_write.
+ */
+ GEM_BUG_ON(guc->send_regs.fw_domains);
+
+ /*
+ * This register is used by the i915 and GuC for MMIO based
+ * communication. Once we are in this code CTBs are the only
+ * method the i915 uses to communicate with the GuC so it is
+ * safe to write to this register (a value of 0 is NOP for MMIO
+ * communication). If we ever start mixing CTBs and MMIOs a new
+ * register will have to be chosen. This function is also used
+ * to enforce ordering of a work queue item write and an update
+ * to the process descriptor. When a work queue is being used,
+ * CTBs are also the only mechanism of communication.
+ */
+ intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
+ } else {
+ /* wmb() sufficient for a barrier if in smem */
+ wmb();
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
new file mode 100644
index 000000000..804133df1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -0,0 +1,469 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_H_
+#define _INTEL_GUC_H_
+
+#include <linux/delay.h>
+#include <linux/iosys-map.h>
+#include <linux/xarray.h>
+
+#include "intel_guc_ct.h"
+#include "intel_guc_fw.h"
+#include "intel_guc_fwif.h"
+#include "intel_guc_log.h"
+#include "intel_guc_reg.h"
+#include "intel_guc_slpc_types.h"
+#include "intel_uc_fw.h"
+#include "intel_uncore.h"
+#include "i915_utils.h"
+#include "i915_vma.h"
+
+struct __guc_ads_blob;
+struct intel_guc_state_capture;
+
+/**
+ * struct intel_guc - Top level structure of GuC.
+ *
+ * It handles firmware loading and manages client pool. intel_guc owns an
+ * i915_sched_engine for submission.
+ */
+struct intel_guc {
+ /** @fw: the GuC firmware */
+ struct intel_uc_fw fw;
+ /** @log: sub-structure containing GuC log related data and objects */
+ struct intel_guc_log log;
+ /** @ct: the command transport communication channel */
+ struct intel_guc_ct ct;
+ /** @slpc: sub-structure containing SLPC related data and objects */
+ struct intel_guc_slpc slpc;
+ /** @capture: the error-state-capture module's data and objects */
+ struct intel_guc_state_capture *capture;
+
+ /** @sched_engine: Global engine used to submit requests to GuC */
+ struct i915_sched_engine *sched_engine;
+ /**
+ * @stalled_request: if GuC can't process a request for any reason, we
+ * save it until GuC restarts processing. No other request can be
+ * submitted until the stalled request is processed.
+ */
+ struct i915_request *stalled_request;
+ /**
+ * @submission_stall_reason: reason why submission is stalled
+ */
+ enum {
+ STALL_NONE,
+ STALL_REGISTER_CONTEXT,
+ STALL_MOVE_LRC_TAIL,
+ STALL_ADD_REQUEST,
+ } submission_stall_reason;
+
+ /* intel_guc_recv interrupt related state */
+ /** @irq_lock: protects GuC irq state */
+ spinlock_t irq_lock;
+ /**
+ * @msg_enabled_mask: mask of events that are processed when receiving
+ * an INTEL_GUC_ACTION_DEFAULT G2H message.
+ */
+ unsigned int msg_enabled_mask;
+
+ /**
+ * @outstanding_submission_g2h: number of outstanding GuC to Host
+ * responses related to GuC submission, used to determine if the GT is
+ * idle
+ */
+ atomic_t outstanding_submission_g2h;
+
+ /** @interrupts: pointers to GuC interrupt-managing functions. */
+ struct {
+ void (*reset)(struct intel_guc *guc);
+ void (*enable)(struct intel_guc *guc);
+ void (*disable)(struct intel_guc *guc);
+ } interrupts;
+
+ /**
+ * @submission_state: sub-structure for submission state protected by
+ * single lock
+ */
+ struct {
+ /**
+ * @lock: protects everything in submission_state,
+ * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
+ * out of zero
+ */
+ spinlock_t lock;
+ /**
+ * @guc_ids: used to allocate new guc_ids, single-lrc
+ */
+ struct ida guc_ids;
+ /**
+ * @num_guc_ids: Number of guc_ids, selftest feature to be able
+ * to reduce this number while testing.
+ */
+ int num_guc_ids;
+ /**
+ * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+ */
+ unsigned long *guc_ids_bitmap;
+ /**
+ * @guc_id_list: list of intel_context with valid guc_ids but no
+ * refs
+ */
+ struct list_head guc_id_list;
+ /**
+ * @destroyed_contexts: list of contexts waiting to be destroyed
+ * (deregistered with the GuC)
+ */
+ struct list_head destroyed_contexts;
+ /**
+ * @destroyed_worker: worker to deregister contexts, need as we
+ * need to take a GT PM reference and can't from destroy
+ * function as it might be in an atomic context (no sleeping)
+ */
+ struct work_struct destroyed_worker;
+ /**
+ * @reset_fail_worker: worker to trigger a GT reset after an
+ * engine reset fails
+ */
+ struct work_struct reset_fail_worker;
+ /**
+ * @reset_fail_mask: mask of engines that failed to reset
+ */
+ intel_engine_mask_t reset_fail_mask;
+ } submission_state;
+
+ /**
+ * @submission_supported: tracks whether we support GuC submission on
+ * the current platform
+ */
+ bool submission_supported;
+ /** @submission_selected: tracks whether the user enabled GuC submission */
+ bool submission_selected;
+ /** @submission_initialized: tracks whether GuC submission has been initialised */
+ bool submission_initialized;
+ /**
+ * @rc_supported: tracks whether we support GuC rc on the current platform
+ */
+ bool rc_supported;
+ /** @rc_selected: tracks whether the user enabled GuC rc */
+ bool rc_selected;
+
+ /** @ads_vma: object allocated to hold the GuC ADS */
+ struct i915_vma *ads_vma;
+ /** @ads_map: contents of the GuC ADS */
+ struct iosys_map ads_map;
+ /** @ads_regset_size: size of the save/restore regsets in the ADS */
+ u32 ads_regset_size;
+ /**
+ * @ads_regset_count: number of save/restore registers in the ADS for
+ * each engine
+ */
+ u32 ads_regset_count[I915_NUM_ENGINES];
+ /** @ads_regset: save/restore regsets in the ADS */
+ struct guc_mmio_reg *ads_regset;
+ /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
+ u32 ads_golden_ctxt_size;
+ /** @ads_capture_size: size of register lists in the ADS used for error capture */
+ u32 ads_capture_size;
+ /** @ads_engine_usage_size: size of engine usage in the ADS */
+ u32 ads_engine_usage_size;
+
+ /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
+ struct i915_vma *lrc_desc_pool_v69;
+ /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
+ void *lrc_desc_pool_vaddr_v69;
+
+ /**
+ * @context_lookup: used to resolve intel_context from guc_id, if a
+ * context is present in this structure it is registered with the GuC
+ */
+ struct xarray context_lookup;
+
+ /** @params: Control params for fw initialization */
+ u32 params[GUC_CTL_MAX_DWORDS];
+
+ /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */
+ struct {
+ u32 base;
+ unsigned int count;
+ enum forcewake_domains fw_domains;
+ } send_regs;
+
+ /** @notify_reg: register used to send interrupts to the GuC FW */
+ i915_reg_t notify_reg;
+
+ /**
+ * @mmio_msg: notification bitmask that the GuC writes in one of its
+ * registers when the CT channel is disabled, to be processed when the
+ * channel is back up.
+ */
+ u32 mmio_msg;
+
+ /** @send_mutex: used to serialize the intel_guc_send actions */
+ struct mutex send_mutex;
+
+ /**
+ * @timestamp: GT timestamp object that stores a copy of the timestamp
+ * and adjusts it for overflow using a worker.
+ */
+ struct {
+ /**
+ * @lock: Lock protecting the below fields and the engine stats.
+ */
+ spinlock_t lock;
+
+ /**
+ * @gt_stamp: 64 bit extended value of the GT timestamp.
+ */
+ u64 gt_stamp;
+
+ /**
+ * @ping_delay: Period for polling the GT timestamp for
+ * overflow.
+ */
+ unsigned long ping_delay;
+
+ /**
+ * @work: Periodic work to adjust GT timestamp, engine and
+ * context usage for overflows.
+ */
+ struct delayed_work work;
+
+ /**
+ * @shift: Right shift value for the gpm timestamp
+ */
+ u32 shift;
+
+ /**
+ * @last_stat_jiffies: jiffies at last actual stats collection time
+ * We use this timestamp to ensure we don't oversample the
+ * stats because runtime power management events can trigger
+ * stats collection at much higher rates than required.
+ */
+ unsigned long last_stat_jiffies;
+ } timestamp;
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ /**
+ * @number_guc_id_stolen: The number of guc_ids that have been stolen
+ */
+ int number_guc_id_stolen;
+#endif
+};
+
+static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
+{
+ return container_of(log, struct intel_guc, log);
+}
+
+static
+inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
+}
+
+static
+inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 g2h_len_dw)
+{
+ return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
+ MAKE_SEND_FLAGS(g2h_len_dw));
+}
+
+static inline int
+intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size)
+{
+ return intel_guc_ct_send(&guc->ct, action, len,
+ response_buf, response_buf_size, 0);
+}
+
+static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
+ const u32 *action,
+ u32 len,
+ u32 g2h_len_dw,
+ bool loop)
+{
+ int err;
+ unsigned int sleep_period_ms = 1;
+ bool not_atomic = !in_atomic() && !irqs_disabled();
+
+ /*
+ * FIXME: Have caller pass in if we are in an atomic context to avoid
+ * using in_atomic(). It is likely safe here as we check for irqs
+ * disabled which basically all the spin locks in the i915 do but
+ * regardless this should be cleaned up.
+ */
+
+ /* No sleeping with spin locks, just busy loop */
+ might_sleep_if(loop && not_atomic);
+
+retry:
+ err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
+ if (unlikely(err == -EBUSY && loop)) {
+ if (likely(not_atomic)) {
+ if (msleep_interruptible(sleep_period_ms))
+ return -EINTR;
+ sleep_period_ms = sleep_period_ms << 1;
+ } else {
+ cpu_relax();
+ }
+ goto retry;
+ }
+
+ return err;
+}
+
+static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
+{
+ intel_guc_ct_event_handler(&guc->ct);
+}
+
+/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
+#define GUC_GGTT_TOP 0xFEE00000
+
+/**
+ * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
+ * @guc: intel_guc structure.
+ * @vma: i915 graphics virtual memory area.
+ *
+ * GuC does not allow any gfx GGTT address that falls into range
+ * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
+ * Currently, in order to exclude [0, ggtt.pin_bias) address space from
+ * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
+ * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
+ *
+ * Return: GGTT offset of the @vma.
+ */
+static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
+ struct i915_vma *vma)
+{
+ u32 offset = i915_ggtt_offset(vma);
+
+ GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
+ GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
+
+ return offset;
+}
+
+void intel_guc_init_early(struct intel_guc *guc);
+void intel_guc_init_late(struct intel_guc *guc);
+void intel_guc_init_send_regs(struct intel_guc *guc);
+void intel_guc_write_params(struct intel_guc *guc);
+int intel_guc_init(struct intel_guc *guc);
+void intel_guc_fini(struct intel_guc *guc);
+void intel_guc_notify(struct intel_guc *guc);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size);
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+ const u32 *payload, u32 len);
+int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
+int intel_guc_suspend(struct intel_guc *guc);
+int intel_guc_resume(struct intel_guc *guc);
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+ struct i915_vma **out_vma, void **out_vaddr);
+int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
+int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
+
+static inline bool intel_guc_is_supported(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_supported(&guc->fw);
+}
+
+static inline bool intel_guc_is_wanted(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_enabled(&guc->fw);
+}
+
+static inline bool intel_guc_is_used(struct intel_guc *guc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&guc->fw);
+}
+
+static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_running(&guc->fw);
+}
+
+static inline bool intel_guc_is_ready(struct intel_guc *guc)
+{
+ return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
+}
+
+static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.reset(guc);
+}
+
+static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.enable(guc);
+}
+
+static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.disable(guc);
+}
+
+static inline int intel_guc_sanitize(struct intel_guc *guc)
+{
+ intel_uc_fw_sanitize(&guc->fw);
+ intel_guc_disable_interrupts(guc);
+ intel_guc_ct_sanitize(&guc->ct);
+ guc->mmio_msg = 0;
+
+ return 0;
+}
+
+static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
+{
+ spin_lock_irq(&guc->irq_lock);
+ guc->msg_enabled_mask |= mask;
+ spin_unlock_irq(&guc->irq_lock);
+}
+
+static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
+{
+ spin_lock_irq(&guc->irq_lock);
+ guc->msg_enabled_mask &= ~mask;
+ spin_unlock_irq(&guc->irq_lock);
+}
+
+int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
+
+int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len);
+int intel_guc_sched_done_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len);
+int intel_guc_context_reset_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len);
+int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len);
+int intel_guc_error_capture_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len);
+
+struct intel_engine_cs *
+intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance);
+
+void intel_guc_find_hung_context(struct intel_engine_cs *engine);
+
+int intel_guc_global_policies_update(struct intel_guc *guc);
+
+void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
+
+void intel_guc_submission_reset_prepare(struct intel_guc *guc);
+void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
+void intel_guc_submission_reset_finish(struct intel_guc *guc);
+void intel_guc_submission_cancel_requests(struct intel_guc *guc);
+
+void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
+
+void intel_guc_write_barrier(struct intel_guc *guc);
+
+void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
new file mode 100644
index 000000000..74cbe8eaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -0,0 +1,905 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#include <linux/bsearch.h>
+
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_lrc.h"
+#include "gt/shmem_utils.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_fwif.h"
+#include "intel_uc.h"
+#include "i915_drv.h"
+
+/*
+ * The Additional Data Struct (ADS) has pointers for different buffers used by
+ * the GuC. One single gem object contains the ADS struct itself (guc_ads) and
+ * all the extra buffers indirectly linked via the ADS struct's entries.
+ *
+ * Layout of the ADS blob allocated for the GuC:
+ *
+ * +---------------------------------------+ <== base
+ * | guc_ads |
+ * +---------------------------------------+
+ * | guc_policies |
+ * +---------------------------------------+
+ * | guc_gt_system_info |
+ * +---------------------------------------+
+ * | guc_engine_usage |
+ * +---------------------------------------+ <== static
+ * | guc_mmio_reg[countA] (engine 0.0) |
+ * | guc_mmio_reg[countB] (engine 0.1) |
+ * | guc_mmio_reg[countC] (engine 1.0) |
+ * | ... |
+ * +---------------------------------------+ <== dynamic
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ * | golden contexts |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ * | capture lists |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ * | private data |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ */
+struct __guc_ads_blob {
+ struct guc_ads ads;
+ struct guc_policies policies;
+ struct guc_gt_system_info system_info;
+ struct guc_engine_usage engine_usage;
+ /* From here on, location is dynamic! Refer to above diagram. */
+ struct guc_mmio_reg regset[];
+} __packed;
+
+#define ads_blob_read(guc_, field_) \
+ iosys_map_rd_field(&(guc_)->ads_map, 0, struct __guc_ads_blob, field_)
+
+#define ads_blob_write(guc_, field_, val_) \
+ iosys_map_wr_field(&(guc_)->ads_map, 0, struct __guc_ads_blob, \
+ field_, val_)
+
+#define info_map_write(map_, field_, val_) \
+ iosys_map_wr_field(map_, 0, struct guc_gt_system_info, field_, val_)
+
+#define info_map_read(map_, field_) \
+ iosys_map_rd_field(map_, 0, struct guc_gt_system_info, field_)
+
+static u32 guc_ads_regset_size(struct intel_guc *guc)
+{
+ GEM_BUG_ON(!guc->ads_regset_size);
+ return guc->ads_regset_size;
+}
+
+static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
+{
+ return PAGE_ALIGN(guc->ads_golden_ctxt_size);
+}
+
+static u32 guc_ads_capture_size(struct intel_guc *guc)
+{
+ return PAGE_ALIGN(guc->ads_capture_size);
+}
+
+static u32 guc_ads_private_data_size(struct intel_guc *guc)
+{
+ return PAGE_ALIGN(guc->fw.private_data_size);
+}
+
+static u32 guc_ads_regset_offset(struct intel_guc *guc)
+{
+ return offsetof(struct __guc_ads_blob, regset);
+}
+
+static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_regset_offset(guc) +
+ guc_ads_regset_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
+static u32 guc_ads_capture_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_golden_ctxt_offset(guc) +
+ guc_ads_golden_ctxt_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
+static u32 guc_ads_private_data_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_capture_offset(guc) +
+ guc_ads_capture_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
+static u32 guc_ads_blob_size(struct intel_guc *guc)
+{
+ return guc_ads_private_data_offset(guc) +
+ guc_ads_private_data_size(guc);
+}
+
+static void guc_policies_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
+ u32 global_flags = 0;
+
+ ads_blob_write(guc, policies.dpc_promote_time,
+ GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
+ ads_blob_write(guc, policies.max_num_work_items,
+ GLOBAL_POLICY_MAX_NUM_WI);
+
+ if (i915->params.reset < 2)
+ global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
+
+ ads_blob_write(guc, policies.global_flags, global_flags);
+ ads_blob_write(guc, policies.is_valid, 1);
+}
+
+void intel_guc_ads_print_policy_info(struct intel_guc *guc,
+ struct drm_printer *dp)
+{
+ if (unlikely(iosys_map_is_null(&guc->ads_map)))
+ return;
+
+ drm_printf(dp, "Global scheduling policies:\n");
+ drm_printf(dp, " DPC promote time = %u\n",
+ ads_blob_read(guc, policies.dpc_promote_time));
+ drm_printf(dp, " Max num work items = %u\n",
+ ads_blob_read(guc, policies.max_num_work_items));
+ drm_printf(dp, " Flags = %u\n",
+ ads_blob_read(guc, policies.global_flags));
+}
+
+static int guc_action_policies_update(struct intel_guc *guc, u32 policy_offset)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
+ policy_offset
+ };
+
+ return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+}
+
+int intel_guc_global_policies_update(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 scheduler_policies;
+ intel_wakeref_t wakeref;
+ int ret;
+
+ if (iosys_map_is_null(&guc->ads_map))
+ return -EOPNOTSUPP;
+
+ scheduler_policies = ads_blob_read(guc, ads.scheduler_policies);
+ GEM_BUG_ON(!scheduler_policies);
+
+ guc_policies_init(guc);
+
+ if (!intel_guc_is_ready(guc))
+ return 0;
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ ret = guc_action_policies_update(guc, scheduler_policies);
+
+ return ret;
+}
+
+static void guc_mapping_table_init(struct intel_gt *gt,
+ struct iosys_map *info_map)
+{
+ unsigned int i, j;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* Table must be set to invalid values for entries not used */
+ for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i)
+ for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
+ info_map_write(info_map, mapping_table[i][j],
+ GUC_MAX_INSTANCES_PER_CLASS);
+
+ for_each_engine(engine, gt, id) {
+ u8 guc_class = engine_class_to_guc_class(engine->class);
+
+ info_map_write(info_map, mapping_table[guc_class][ilog2(engine->logical_mask)],
+ engine->instance);
+ }
+}
+
+/*
+ * The save/restore register list must be pre-calculated to a temporary
+ * buffer before it can be copied inside the ADS.
+ */
+struct temp_regset {
+ /*
+ * ptr to the section of the storage for the engine currently being
+ * worked on
+ */
+ struct guc_mmio_reg *registers;
+ /* ptr to the base of the allocated storage for all engines */
+ struct guc_mmio_reg *storage;
+ u32 storage_used;
+ u32 storage_max;
+};
+
+static int guc_mmio_reg_cmp(const void *a, const void *b)
+{
+ const struct guc_mmio_reg *ra = a;
+ const struct guc_mmio_reg *rb = b;
+
+ return (int)ra->offset - (int)rb->offset;
+}
+
+static struct guc_mmio_reg * __must_check
+__mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
+{
+ u32 pos = regset->storage_used;
+ struct guc_mmio_reg *slot;
+
+ if (pos >= regset->storage_max) {
+ size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE);
+ struct guc_mmio_reg *r = krealloc(regset->storage,
+ size, GFP_KERNEL);
+ if (!r) {
+ WARN_ONCE(1, "Incomplete regset list: can't add register (%d)\n",
+ -ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ regset->registers = r + (regset->registers - regset->storage);
+ regset->storage = r;
+ regset->storage_max = size / sizeof(*slot);
+ }
+
+ slot = &regset->storage[pos];
+ regset->storage_used++;
+ *slot = *reg;
+
+ return slot;
+}
+
+#define GUC_REGSET_STEERING(group, instance) ( \
+ FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \
+ FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \
+ GUC_REGSET_NEEDS_STEERING \
+)
+
+static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
+ struct temp_regset *regset,
+ i915_reg_t reg, u32 flags)
+{
+ u32 count = regset->storage_used - (regset->registers - regset->storage);
+ u32 offset = i915_mmio_reg_offset(reg);
+ struct guc_mmio_reg entry = {
+ .offset = offset,
+ .flags = flags,
+ };
+ struct guc_mmio_reg *slot;
+ u8 group, inst;
+
+ /*
+ * The mmio list is built using separate lists within the driver.
+ * It's possible that at some point we may attempt to add the same
+ * register more than once. Do not consider this an error; silently
+ * move on if the register is already in the list.
+ */
+ if (bsearch(&entry, regset->registers, count,
+ sizeof(entry), guc_mmio_reg_cmp))
+ return 0;
+
+ /*
+ * The GuC doesn't have a default steering, so we need to explicitly
+ * steer all registers that need steering. However, we do not keep track
+ * of all the steering ranges, only of those that have a chance of using
+ * a non-default steering from the i915 pov. Instead of adding such
+ * tracking, it is easier to just program the default steering for all
+ * regs that don't need a non-default one.
+ */
+ intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
+ entry.flags |= GUC_REGSET_STEERING(group, inst);
+
+ slot = __mmio_reg_add(regset, &entry);
+ if (IS_ERR(slot))
+ return PTR_ERR(slot);
+
+ while (slot-- > regset->registers) {
+ GEM_BUG_ON(slot[0].offset == slot[1].offset);
+ if (slot[1].offset > slot[0].offset)
+ break;
+
+ swap(slot[1], slot[0]);
+ }
+
+ return 0;
+}
+
+#define GUC_MMIO_REG_ADD(gt, regset, reg, masked) \
+ guc_mmio_reg_add(gt, \
+ regset, \
+ (reg), \
+ (masked) ? GUC_REGSET_MASKED : 0)
+
+static int guc_mmio_regset_init(struct temp_regset *regset,
+ struct intel_engine_cs *engine)
+{
+ struct intel_gt *gt = engine->gt;
+ const u32 base = engine->mmio_base;
+ struct i915_wa_list *wal = &engine->wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
+ int ret = 0;
+
+ /*
+ * Each engine's registers point to a new start relative to
+ * storage
+ */
+ regset->registers = regset->storage + regset->storage_used;
+
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_MODE_GEN7(base), true);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_HWS_PGA(base), false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_IMR(base), false);
+
+ if ((engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) &&
+ CCS_MASK(engine->gt))
+ ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg);
+
+ /* Be extra paranoid and include all whitelist registers. */
+ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
+ ret |= GUC_MMIO_REG_ADD(gt, regset,
+ RING_FORCE_TO_NONPRIV(base, i),
+ false);
+
+ /* add in local MOCS registers */
+ for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++)
+ ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
+
+ return ret ? -1 : 0;
+}
+
+static long guc_mmio_reg_state_create(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct temp_regset temp_set = {};
+ long total = 0;
+ long ret;
+
+ for_each_engine(engine, gt, id) {
+ u32 used = temp_set.storage_used;
+
+ ret = guc_mmio_regset_init(&temp_set, engine);
+ if (ret < 0)
+ goto fail_regset_init;
+
+ guc->ads_regset_count[id] = temp_set.storage_used - used;
+ total += guc->ads_regset_count[id];
+ }
+
+ guc->ads_regset = temp_set.storage;
+
+ drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n",
+ (temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10);
+
+ return total * sizeof(struct guc_mmio_reg);
+
+fail_regset_init:
+ kfree(temp_set.storage);
+ return ret;
+}
+
+static void guc_mmio_reg_state_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 addr_ggtt, offset;
+
+ offset = guc_ads_regset_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ iosys_map_memcpy_to(&guc->ads_map, offset, guc->ads_regset,
+ guc->ads_regset_size);
+
+ for_each_engine(engine, gt, id) {
+ u32 count = guc->ads_regset_count[id];
+ u8 guc_class;
+
+ /* Class index is checked in class converter */
+ GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS);
+
+ guc_class = engine_class_to_guc_class(engine->class);
+
+ if (!count) {
+ ads_blob_write(guc,
+ ads.reg_state_list[guc_class][engine->instance].address,
+ 0);
+ ads_blob_write(guc,
+ ads.reg_state_list[guc_class][engine->instance].count,
+ 0);
+ continue;
+ }
+
+ ads_blob_write(guc,
+ ads.reg_state_list[guc_class][engine->instance].address,
+ addr_ggtt);
+ ads_blob_write(guc,
+ ads.reg_state_list[guc_class][engine->instance].count,
+ count);
+
+ addr_ggtt += count * sizeof(struct guc_mmio_reg);
+ }
+}
+
+static void fill_engine_enable_masks(struct intel_gt *gt,
+ struct iosys_map *info_map)
+{
+ info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt));
+ info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt));
+ info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], BCS_MASK(gt));
+ info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
+ info_map_write(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS], VEBOX_MASK(gt));
+}
+
+#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
+#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+ XEHP_LR_HW_CONTEXT_SIZE : \
+ LR_HW_CONTEXT_SIZE)
+#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
+static int guc_prep_golden_context(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 addr_ggtt, offset;
+ u32 total_size = 0, alloc_size, real_size;
+ u8 engine_class, guc_class;
+ struct guc_gt_system_info local_info;
+ struct iosys_map info_map;
+
+ /*
+ * Reserve the memory for the golden contexts and point GuC at it but
+ * leave it empty for now. The context data will be filled in later
+ * once there is something available to put there.
+ *
+ * Note that the HWSP and ring context are not included.
+ *
+ * Note also that the storage must be pinned in the GGTT, so that the
+ * address won't change after GuC has been told where to find it. The
+ * GuC will also validate that the LRC base + size fall within the
+ * allowed GGTT range.
+ */
+ if (!iosys_map_is_null(&guc->ads_map)) {
+ offset = guc_ads_golden_ctxt_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+ info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
+ offsetof(struct __guc_ads_blob, system_info));
+ } else {
+ memset(&local_info, 0, sizeof(local_info));
+ iosys_map_set_vaddr(&info_map, &local_info);
+ fill_engine_enable_masks(gt, &info_map);
+ }
+
+ for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
+ if (engine_class == OTHER_CLASS)
+ continue;
+
+ guc_class = engine_class_to_guc_class(engine_class);
+
+ if (!info_map_read(&info_map, engine_enabled_masks[guc_class]))
+ continue;
+
+ real_size = intel_engine_context_size(gt, engine_class);
+ alloc_size = PAGE_ALIGN(real_size);
+ total_size += alloc_size;
+
+ if (iosys_map_is_null(&guc->ads_map))
+ continue;
+
+ /*
+ * This interface is slightly confusing. We need to pass the
+ * base address of the full golden context and the size of just
+ * the engine state, which is the section of the context image
+ * that starts after the execlists context. This is required to
+ * allow the GuC to restore just the engine state when a
+ * watchdog reset occurs.
+ * We calculate the engine state size by removing the size of
+ * what comes before it in the context image (which is identical
+ * on all engines).
+ */
+ ads_blob_write(guc, ads.eng_state_size[guc_class],
+ real_size - LRC_SKIP_SIZE(gt->i915));
+ ads_blob_write(guc, ads.golden_context_lrca[guc_class],
+ addr_ggtt);
+
+ addr_ggtt += alloc_size;
+ }
+
+ /* Make sure current size matches what we calculated previously */
+ if (guc->ads_golden_ctxt_size)
+ GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
+
+ return total_size;
+}
+
+static struct intel_engine_cs *find_engine_state(struct intel_gt *gt, u8 engine_class)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id) {
+ if (engine->class != engine_class)
+ continue;
+
+ if (!engine->default_state)
+ continue;
+
+ return engine;
+ }
+
+ return NULL;
+}
+
+static void guc_init_golden_context(struct intel_guc *guc)
+{
+ struct intel_engine_cs *engine;
+ struct intel_gt *gt = guc_to_gt(guc);
+ unsigned long offset;
+ u32 addr_ggtt, total_size = 0, alloc_size, real_size;
+ u8 engine_class, guc_class;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return;
+
+ GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
+
+ /*
+ * Go back and fill in the golden context data now that it is
+ * available.
+ */
+ offset = guc_ads_golden_ctxt_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
+ if (engine_class == OTHER_CLASS)
+ continue;
+
+ guc_class = engine_class_to_guc_class(engine_class);
+ if (!ads_blob_read(guc, system_info.engine_enabled_masks[guc_class]))
+ continue;
+
+ real_size = intel_engine_context_size(gt, engine_class);
+ alloc_size = PAGE_ALIGN(real_size);
+ total_size += alloc_size;
+
+ engine = find_engine_state(gt, engine_class);
+ if (!engine) {
+ drm_err(&gt->i915->drm, "No engine state recorded for class %d!\n",
+ engine_class);
+ ads_blob_write(guc, ads.eng_state_size[guc_class], 0);
+ ads_blob_write(guc, ads.golden_context_lrca[guc_class], 0);
+ continue;
+ }
+
+ GEM_BUG_ON(ads_blob_read(guc, ads.eng_state_size[guc_class]) !=
+ real_size - LRC_SKIP_SIZE(gt->i915));
+ GEM_BUG_ON(ads_blob_read(guc, ads.golden_context_lrca[guc_class]) != addr_ggtt);
+
+ addr_ggtt += alloc_size;
+
+ shmem_read_to_iosys_map(engine->default_state, 0, &guc->ads_map,
+ offset, real_size);
+ offset += alloc_size;
+ }
+
+ GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
+}
+
+static int
+guc_capture_prep_lists(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
+ struct guc_gt_system_info local_info;
+ struct iosys_map info_map;
+ bool ads_is_mapped;
+ size_t size = 0;
+ void *ptr;
+ int i, j;
+
+ ads_is_mapped = !iosys_map_is_null(&guc->ads_map);
+ if (ads_is_mapped) {
+ capture_offset = guc_ads_capture_offset(guc);
+ ads_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma);
+ info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
+ offsetof(struct __guc_ads_blob, system_info));
+ } else {
+ memset(&local_info, 0, sizeof(local_info));
+ iosys_map_set_vaddr(&info_map, &local_info);
+ fill_engine_enable_masks(gt, &info_map);
+ }
+
+ /* first, set aside the first page for a capture_list with zero descriptors */
+ total_size = PAGE_SIZE;
+ if (ads_is_mapped) {
+ if (!intel_guc_capture_getnullheader(guc, &ptr, &size))
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ null_ggtt = ads_ggtt + capture_offset;
+ capture_offset += PAGE_SIZE;
+ }
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
+ for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
+
+ /* null list if we dont have said engine or list */
+ if (!info_map_read(&info_map, engine_enabled_masks[j])) {
+ if (ads_is_mapped) {
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ }
+ continue;
+ }
+ if (intel_guc_capture_getlistsize(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ j, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ goto engine_instance_list;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ j, &ptr)) {
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_class[i][j], ads_ggtt +
+ capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
+engine_instance_list:
+ if (intel_guc_capture_getlistsize(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ j, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ continue;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ j, &ptr)) {
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_instance[i][j], ads_ggtt +
+ capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
+ }
+ if (intel_guc_capture_getlistsize(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_global[i], null_ggtt);
+ continue;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0,
+ &ptr)) {
+ ads_blob_write(guc, ads.capture_global[i], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_global[i], ads_ggtt + capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
+ }
+
+ if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
+ drm_warn(&i915->drm, "GuC->ADS->Capture alloc size changed from %d to %d\n",
+ guc->ads_capture_size, PAGE_ALIGN(total_size));
+
+ return PAGE_ALIGN(total_size);
+}
+
+static void __guc_ads_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
+ struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
+ offsetof(struct __guc_ads_blob, system_info));
+ u32 base;
+
+ /* GuC scheduling policies */
+ guc_policies_init(guc);
+
+ /* System info */
+ fill_engine_enable_masks(gt, &info_map);
+
+ ads_blob_write(guc, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED],
+ hweight8(gt->info.sseu.slice_mask));
+ ads_blob_write(guc, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK],
+ gt->info.vdbox_sfc_access);
+
+ if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) {
+ u32 distdbreg = intel_uncore_read(gt->uncore,
+ GEN12_DIST_DBS_POPULATED);
+ ads_blob_write(guc,
+ system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
+ ((distdbreg >> GEN12_DOORBELLS_PER_SQIDI_SHIFT)
+ & GEN12_DOORBELLS_PER_SQIDI) + 1);
+ }
+
+ /* Golden contexts for re-initialising after a watchdog reset */
+ guc_prep_golden_context(guc);
+
+ guc_mapping_table_init(guc_to_gt(guc), &info_map);
+
+ base = intel_guc_ggtt_offset(guc, guc->ads_vma);
+
+ /* Lists for error capture debug */
+ guc_capture_prep_lists(guc);
+
+ /* ADS */
+ ads_blob_write(guc, ads.scheduler_policies, base +
+ offsetof(struct __guc_ads_blob, policies));
+ ads_blob_write(guc, ads.gt_system_info, base +
+ offsetof(struct __guc_ads_blob, system_info));
+
+ /* MMIO save/restore list */
+ guc_mmio_reg_state_init(guc);
+
+ /* Private Data */
+ ads_blob_write(guc, ads.private_data, base +
+ guc_ads_private_data_offset(guc));
+
+ i915_gem_object_flush_map(guc->ads_vma->obj);
+}
+
+/**
+ * intel_guc_ads_create() - allocates and initializes GuC ADS.
+ * @guc: intel_guc struct
+ *
+ * GuC needs memory block (Additional Data Struct), where it will store
+ * some data. Allocate and initialize such memory block for GuC use.
+ */
+int intel_guc_ads_create(struct intel_guc *guc)
+{
+ void *ads_blob;
+ u32 size;
+ int ret;
+
+ GEM_BUG_ON(guc->ads_vma);
+
+ /*
+ * Create reg state size dynamically on system memory to be copied to
+ * the final ads blob on gt init/reset
+ */
+ ret = guc_mmio_reg_state_create(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_regset_size = ret;
+
+ /* Likewise the golden contexts: */
+ ret = guc_prep_golden_context(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_golden_ctxt_size = ret;
+
+ /* Likewise the capture lists: */
+ ret = guc_capture_prep_lists(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_capture_size = ret;
+
+ /* Now the total size can be determined: */
+ size = guc_ads_blob_size(guc);
+
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
+ &ads_blob);
+ if (ret)
+ return ret;
+
+ if (i915_gem_object_is_lmem(guc->ads_vma->obj))
+ iosys_map_set_vaddr_iomem(&guc->ads_map, (void __iomem *)ads_blob);
+ else
+ iosys_map_set_vaddr(&guc->ads_map, ads_blob);
+
+ __guc_ads_init(guc);
+
+ return 0;
+}
+
+void intel_guc_ads_init_late(struct intel_guc *guc)
+{
+ /*
+ * The golden context setup requires the saved engine state from
+ * __engines_record_defaults(). However, that requires engines to be
+ * operational which means the ADS must already have been configured.
+ * Fortunately, the golden context state is not needed until a hang
+ * occurs, so it can be filled in during this late init phase.
+ */
+ guc_init_golden_context(guc);
+}
+
+void intel_guc_ads_destroy(struct intel_guc *guc)
+{
+ i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
+ iosys_map_clear(&guc->ads_map);
+ kfree(guc->ads_regset);
+}
+
+static void guc_ads_private_data_reset(struct intel_guc *guc)
+{
+ u32 size;
+
+ size = guc_ads_private_data_size(guc);
+ if (!size)
+ return;
+
+ iosys_map_memset(&guc->ads_map, guc_ads_private_data_offset(guc),
+ 0, size);
+}
+
+/**
+ * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse
+ * @guc: intel_guc struct
+ *
+ * GuC stores some data in ADS, which might be stale after a reset.
+ * Reinitialize whole ADS in case any part of it was corrupted during
+ * previous GuC run.
+ */
+void intel_guc_ads_reset(struct intel_guc *guc)
+{
+ if (!guc->ads_vma)
+ return;
+
+ __guc_ads_init(guc);
+
+ guc_ads_private_data_reset(guc);
+}
+
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
+{
+ return intel_guc_ggtt_offset(guc, guc->ads_vma) +
+ offsetof(struct __guc_ads_blob, engine_usage);
+}
+
+struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
+{
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u8 guc_class = engine_class_to_guc_class(engine->class);
+ size_t offset = offsetof(struct __guc_ads_blob,
+ engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
+
+ return IOSYS_MAP_INIT_OFFSET(&guc->ads_map, offset);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
new file mode 100644
index 000000000..1c64f4d6e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_ADS_H_
+#define _INTEL_GUC_ADS_H_
+
+#include <linux/types.h>
+#include <linux/iosys-map.h>
+
+struct intel_guc;
+struct drm_printer;
+struct intel_engine_cs;
+
+int intel_guc_ads_create(struct intel_guc *guc);
+void intel_guc_ads_destroy(struct intel_guc *guc);
+void intel_guc_ads_init_late(struct intel_guc *guc);
+void intel_guc_ads_reset(struct intel_guc *guc);
+void intel_guc_ads_print_policy_info(struct intel_guc *guc,
+ struct drm_printer *p);
+struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine);
+u32 intel_guc_engine_usage_offset(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
new file mode 100644
index 000000000..18a8466f8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -0,0 +1,1685 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2022 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_lrc.h"
+#include "guc_capture_fwif.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_fwif.h"
+#include "i915_drv.h"
+#include "i915_gpu_error.h"
+#include "i915_irq.h"
+#include "i915_memcpy.h"
+#include "i915_reg.h"
+
+/*
+ * Define all device tables of GuC error capture register lists
+ * NOTE: For engine-registers, GuC only needs the register offsets
+ * from the engine-mmio-base
+ */
+#define COMMON_BASE_GLOBAL \
+ { FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
+
+#define COMMON_GEN9BASE_GLOBAL \
+ { ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
+ { DONE_REG, 0, 0, "DONE_REG" }, \
+ { HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
+
+#define GEN9_GLOBAL \
+ { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
+ { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
+
+#define COMMON_GEN12BASE_GLOBAL \
+ { GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
+ { GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
+ { GEN12_AUX_ERR_DBG, 0, 0, "AUX_ERR_DBG" }, \
+ { GEN12_GAM_DONE, 0, 0, "GAM_DONE" }, \
+ { GEN12_RING_FAULT_REG, 0, 0, "FAULT_REG" }
+
+#define COMMON_BASE_ENGINE_INSTANCE \
+ { RING_PSMI_CTL(0), 0, 0, "RC PSMI" }, \
+ { RING_ESR(0), 0, 0, "ESR" }, \
+ { RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
+ { RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
+ { RING_IPEIR(0), 0, 0, "IPEIR" }, \
+ { RING_IPEHR(0), 0, 0, "IPEHR" }, \
+ { RING_INSTPS(0), 0, 0, "INSTPS" }, \
+ { RING_BBADDR(0), 0, 0, "RING_BBADDR_LOW32" }, \
+ { RING_BBADDR_UDW(0), 0, 0, "RING_BBADDR_UP32" }, \
+ { RING_BBSTATE(0), 0, 0, "BB_STATE" }, \
+ { CCID(0), 0, 0, "CCID" }, \
+ { RING_ACTHD(0), 0, 0, "ACTHD_LDW" }, \
+ { RING_ACTHD_UDW(0), 0, 0, "ACTHD_UDW" }, \
+ { RING_INSTPM(0), 0, 0, "INSTPM" }, \
+ { RING_INSTDONE(0), 0, 0, "INSTDONE" }, \
+ { RING_NOPID(0), 0, 0, "RING_NOPID" }, \
+ { RING_START(0), 0, 0, "START" }, \
+ { RING_HEAD(0), 0, 0, "HEAD" }, \
+ { RING_TAIL(0), 0, 0, "TAIL" }, \
+ { RING_CTL(0), 0, 0, "CTL" }, \
+ { RING_MI_MODE(0), 0, 0, "MODE" }, \
+ { RING_CONTEXT_CONTROL(0), 0, 0, "RING_CONTEXT_CONTROL" }, \
+ { RING_HWS_PGA(0), 0, 0, "HWS" }, \
+ { RING_MODE_GEN7(0), 0, 0, "GFX_MODE" }, \
+ { GEN8_RING_PDP_LDW(0, 0), 0, 0, "PDP0_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 0), 0, 0, "PDP0_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 1), 0, 0, "PDP1_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 1), 0, 0, "PDP1_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 2), 0, 0, "PDP2_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 2), 0, 0, "PDP2_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
+
+#define COMMON_BASE_HAS_EU \
+ { EIR, 0, 0, "EIR" }
+
+#define COMMON_BASE_RENDER \
+ { GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
+
+#define COMMON_GEN12BASE_RENDER \
+ { GEN12_SC_INSTDONE_EXTRA, 0, 0, "GEN12_SC_INSTDONE_EXTRA" }, \
+ { GEN12_SC_INSTDONE_EXTRA2, 0, 0, "GEN12_SC_INSTDONE_EXTRA2" }
+
+#define COMMON_GEN12BASE_VEC \
+ { GEN12_SFC_DONE(0), 0, 0, "SFC_DONE[0]" }, \
+ { GEN12_SFC_DONE(1), 0, 0, "SFC_DONE[1]" }, \
+ { GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \
+ { GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" }
+
+/* XE_LPD - Global */
+static const struct __guc_mmio_reg_descr xe_lpd_global_regs[] = {
+ COMMON_BASE_GLOBAL,
+ COMMON_GEN9BASE_GLOBAL,
+ COMMON_GEN12BASE_GLOBAL,
+};
+
+/* XE_LPD - Render / Compute Per-Class */
+static const struct __guc_mmio_reg_descr xe_lpd_rc_class_regs[] = {
+ COMMON_BASE_HAS_EU,
+ COMMON_BASE_RENDER,
+ COMMON_GEN12BASE_RENDER,
+};
+
+/* GEN9/XE_LPD - Render / Compute Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_rc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9/XE_LPD - Media Decode/Encode Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_vd_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* XE_LPD - Video Enhancement Per-Class */
+static const struct __guc_mmio_reg_descr xe_lpd_vec_class_regs[] = {
+ COMMON_GEN12BASE_VEC,
+};
+
+/* GEN9/XE_LPD - Video Enhancement Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_vec_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9/XE_LPD - Blitter Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_blt_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9 - Global */
+static const struct __guc_mmio_reg_descr default_global_regs[] = {
+ COMMON_BASE_GLOBAL,
+ COMMON_GEN9BASE_GLOBAL,
+ GEN9_GLOBAL,
+};
+
+static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {
+ COMMON_BASE_HAS_EU,
+ COMMON_BASE_RENDER,
+};
+
+/*
+ * Empty lists:
+ * GEN9/XE_LPD - Blitter Per-Class
+ * GEN9/XE_LPD - Media Decode/Encode Per-Class
+ * GEN9 - VEC Class
+ */
+static const struct __guc_mmio_reg_descr empty_regs_list[] = {
+};
+
+#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
+#define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x)
+#define MAKE_REGLIST(regslist, regsowner, regstype, class) \
+ { \
+ regslist, \
+ ARRAY_SIZE(regslist), \
+ TO_GCAP_DEF_OWNER(regsowner), \
+ TO_GCAP_DEF_TYPE(regstype), \
+ class, \
+ NULL, \
+ }
+
+/* List of lists */
+static const struct __guc_mmio_reg_descr_group default_lists[] = {
+ MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
+ MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
+ {}
+};
+
+static const struct __guc_mmio_reg_descr_group xe_lpd_lists[] = {
+ MAKE_REGLIST(xe_lpd_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_class_regs, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
+ MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
+ {}
+};
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, u32 id)
+{
+ int i;
+
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].list; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
+ return &reglists[i];
+ }
+
+ return NULL;
+}
+
+static struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, u32 id)
+{
+ int i;
+
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].extlist; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
+ return &reglists[i];
+ }
+
+ return NULL;
+}
+
+static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists)
+{
+ int i = 0;
+
+ if (!reglists)
+ return;
+
+ while (reglists[i].extlist)
+ kfree(reglists[i++].extlist);
+}
+
+struct __ext_steer_reg {
+ const char *name;
+ i915_reg_t reg;
+};
+
+static const struct __ext_steer_reg xe_extregs[] = {
+ {"GEN7_SAMPLER_INSTDONE", GEN7_SAMPLER_INSTDONE},
+ {"GEN7_ROW_INSTDONE", GEN7_ROW_INSTDONE}
+};
+
+static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
+ const struct __ext_steer_reg *extlist,
+ int slice_id, int subslice_id)
+{
+ ext->reg = extlist->reg;
+ ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->regname = extlist->name;
+}
+
+static int
+__alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
+ const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
+{
+ struct __guc_mmio_reg_descr *list;
+
+ list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ newlist->extlist = list;
+ newlist->num_regs = num_regs;
+ newlist->owner = rootlist->owner;
+ newlist->engine = rootlist->engine;
+ newlist->type = rootlist->type;
+
+ return 0;
+}
+
+static void
+guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
+ const struct __guc_mmio_reg_descr_group *lists)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+ struct sseu_dev_info *sseu;
+
+ /* In XE_LPD we only have steered registers for the render-class */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
+ /* skip if extlists was previously allocated */
+ if (!list || guc->capture->extlists)
+ return;
+
+ num_steer_regs = ARRAY_SIZE(xe_extregs);
+
+ sseu = &gt->info.sseu;
+ for_each_ss_steering(iter, gt, slice, subslice)
+ num_tot_regs += num_steer_regs;
+
+ if (!num_tot_regs)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
+ kfree(extlists);
+ return;
+ }
+
+ extarray = extlists[0].extlist;
+ for_each_ss_steering(iter, gt, slice, subslice) {
+ for (i = 0; i < num_steer_regs; ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ }
+
+ guc->capture->extlists = extlists;
+}
+
+static const struct __ext_steer_reg xehpg_extregs[] = {
+ {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
+};
+
+static bool __has_xehpg_extregs(u32 ipver)
+{
+ return (ipver >= IP_VER(12, 55));
+}
+
+static void
+guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
+ const struct __guc_mmio_reg_descr_group *lists,
+ u32 ipver)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct sseu_dev_info *sseu;
+ int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+
+ /* In XE_LP / HPG we only have render-class steering registers during error-capture */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
+ /* skip if extlists was previously allocated */
+ if (!list || guc->capture->extlists)
+ return;
+
+ num_steer_regs = ARRAY_SIZE(xe_extregs);
+ if (__has_xehpg_extregs(ipver))
+ num_steer_regs += ARRAY_SIZE(xehpg_extregs);
+
+ sseu = &gt->info.sseu;
+ for_each_ss_steering(iter, gt, slice, subslice)
+ num_tot_regs += num_steer_regs;
+
+ if (!num_tot_regs)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
+ kfree(extlists);
+ return;
+ }
+
+ extarray = extlists[0].extlist;
+ for_each_ss_steering(iter, gt, slice, subslice) {
+ for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ if (__has_xehpg_extregs(ipver)) {
+ for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
+ __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ }
+ }
+
+ drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs);
+ guc->capture->extlists = extlists;
+}
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_device_reglist(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ if (GRAPHICS_VER(i915) > 11) {
+ /*
+ * For certain engine classes, there are slice and subslice
+ * level registers requiring steering. We allocate and populate
+ * these at init time based on hw config add it as an extension
+ * list at the end of the pre-populated render list.
+ */
+ if (IS_DG2(i915))
+ guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 55));
+ else if (IS_XEHPSDV(i915))
+ guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 50));
+ else
+ guc_capture_alloc_steered_lists_xe_lpd(guc, xe_lpd_lists);
+
+ return xe_lpd_lists;
+ }
+
+ /* if GuC submission is enabled on a non-POR platform, just use a common baseline */
+ return default_lists;
+}
+
+static const char *
+__stringify_type(u32 type)
+{
+ switch (type) {
+ case GUC_CAPTURE_LIST_TYPE_GLOBAL:
+ return "Global";
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ return "Class";
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ return "Instance";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static const char *
+__stringify_engclass(u32 class)
+{
+ switch (class) {
+ case GUC_RENDER_CLASS:
+ return "Render";
+ case GUC_VIDEO_CLASS:
+ return "Video";
+ case GUC_VIDEOENHANCE_CLASS:
+ return "VideoEnhance";
+ case GUC_BLITTER_CLASS:
+ return "Blitter";
+ case GUC_COMPUTE_CLASS:
+ return "Compute";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static int
+guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ struct guc_mmio_reg *ptr, u16 num_entries)
+{
+ u32 i = 0, j = 0;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+
+ if (!reglists)
+ return -ENODEV;
+
+ match = guc_capture_get_one_list(reglists, owner, type, classid);
+ if (!match)
+ return -ENODATA;
+
+ for (i = 0; i < num_entries && i < match->num_regs; ++i) {
+ ptr[i].offset = match->list[i].reg.reg;
+ ptr[i].value = 0xDEADF00D;
+ ptr[i].flags = match->list[i].flags;
+ ptr[i].mask = match->list[i].mask;
+ }
+
+ matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid);
+ if (matchext) {
+ for (i = match->num_regs, j = 0; i < num_entries &&
+ i < (match->num_regs + matchext->num_regs) &&
+ j < matchext->num_regs; ++i, ++j) {
+ ptr[i].offset = matchext->extlist[j].reg.reg;
+ ptr[i].value = 0xDEADF00D;
+ ptr[i].flags = matchext->extlist[j].flags;
+ ptr[i].mask = matchext->extlist[j].mask;
+ }
+ }
+ if (i < num_entries)
+ drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
+ (int)i, (int)num_entries);
+
+ return 0;
+}
+
+static int
+guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid)
+{
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+ int num_regs;
+
+ match = guc_capture_get_one_list(gc->reglists, owner, type, classid);
+ if (!match)
+ return 0;
+
+ num_regs = match->num_regs;
+
+ matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid);
+ if (matchext)
+ num_regs += matchext->num_regs;
+
+ return num_regs;
+}
+
+static int
+guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size, bool is_purpose_est)
+{
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
+ int num_regs;
+
+ if (!gc->reglists) {
+ drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
+ return -ENODEV;
+ }
+
+ if (cache->is_valid) {
+ *size = cache->size;
+ return cache->status;
+ }
+
+ if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
+ !guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
+ if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
+ drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
+ else
+ drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
+ __stringify_type(type), type,
+ __stringify_engclass(classid), classid);
+ return -ENODATA;
+ }
+
+ num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
+ /* intentional empty lists can exist depending on hw config */
+ if (!num_regs)
+ return -ENODATA;
+
+ if (size)
+ *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
+ (num_regs * sizeof(struct guc_mmio_reg)));
+
+ return 0;
+}
+
+int
+intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size)
+{
+ return guc_capture_getlistsize(guc, owner, type, classid, size, false);
+}
+
+static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
+
+int
+intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ void **outptr)
+{
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_debug_capture_list *listnode;
+ int ret, num_regs;
+ u8 *caplist, *tmp;
+ size_t size = 0;
+
+ if (!gc->reglists)
+ return -ENODEV;
+
+ if (cache->is_valid) {
+ *outptr = cache->ptr;
+ return cache->status;
+ }
+
+ /*
+ * ADS population of input registers is a good
+ * time to pre-allocate cachelist output nodes
+ */
+ guc_capture_create_prealloc_nodes(guc);
+
+ ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size);
+ if (ret) {
+ cache->is_valid = true;
+ cache->ptr = NULL;
+ cache->size = 0;
+ cache->status = ret;
+ return ret;
+ }
+
+ caplist = kzalloc(size, GFP_KERNEL);
+ if (!caplist) {
+ drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist");
+ return -ENOMEM;
+ }
+
+ /* populate capture list header */
+ tmp = caplist;
+ num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid);
+ listnode = (struct guc_debug_capture_list *)tmp;
+ listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
+
+ /* populate list of register descriptor */
+ tmp += sizeof(struct guc_debug_capture_list);
+ guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs);
+
+ /* cache this list */
+ cache->is_valid = true;
+ cache->ptr = caplist;
+ cache->size = size;
+ cache->status = 0;
+
+ *outptr = caplist;
+
+ return 0;
+}
+
+int
+intel_guc_capture_getnullheader(struct intel_guc *guc,
+ void **outptr, size_t *size)
+{
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int tmp = sizeof(u32) * 4;
+ void *null_header;
+
+ if (gc->ads_null_cache) {
+ *outptr = gc->ads_null_cache;
+ *size = tmp;
+ return 0;
+ }
+
+ null_header = kzalloc(tmp, GFP_KERNEL);
+ if (!null_header) {
+ drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist");
+ return -ENOMEM;
+ }
+
+ gc->ads_null_cache = null_header;
+ *outptr = null_header;
+ *size = tmp;
+
+ return 0;
+}
+
+static int
+guc_capture_output_min_size_est(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int worst_min_size = 0;
+ size_t tmp = 0;
+
+ if (!guc->capture)
+ return -ENODEV;
+
+ /*
+ * If every single engine-instance suffered a failure in quick succession but
+ * were all unrelated, then a burst of multiple error-capture events would dump
+ * registers for every one engine instance, one at a time. In this case, GuC
+ * would even dump the global-registers repeatedly.
+ *
+ * For each engine instance, there would be 1 x guc_state_capture_group_t output
+ * followed by 3 x guc_state_capture_t lists. The latter is how the register
+ * dumps are split across different register types (where the '3' are global vs class
+ * vs instance).
+ */
+ for_each_engine(engine, gt, id) {
+ worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
+ (3 * sizeof(struct guc_state_capture_header_t));
+
+ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true))
+ worst_min_size += tmp;
+
+ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ engine->class, &tmp, true)) {
+ worst_min_size += tmp;
+ }
+ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ engine->class, &tmp, true)) {
+ worst_min_size += tmp;
+ }
+ }
+
+ return worst_min_size;
+}
+
+/*
+ * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
+ * before the i915 can read the data out and process it
+ */
+#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
+
+static void check_guc_capture_size(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int min_size = guc_capture_output_min_size_est(guc);
+ int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
+ u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
+
+ /*
+ * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB)
+ * Additionally, its based on space needed to fit all engines getting reset at once
+ * within the same G2H handler task slot. This is very unlikely. However, if GuC really
+ * does run out of space for whatever reason, we will see an separate warning message
+ * when processing the G2H event capture-notification, search for:
+ * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
+ */
+ if (min_size < 0)
+ drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ min_size);
+ else if (min_size > buffer_size)
+ drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
+ buffer_size, min_size);
+ else if (spare_size > buffer_size)
+ drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
+ buffer_size, spare_size, min_size);
+}
+
+/*
+ * KMD Init time flows:
+ * --------------------
+ * --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
+ * intel_guc_ads acquires the register lists by calling
+ * intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times,
+ * where n = 1 for global-reg-list +
+ * num_engine_classes for class-reg-list +
+ * num_engine_classes for instance-reg-list
+ * (since all instances of the same engine-class type
+ * have an identical engine-instance register-list).
+ * ADS module also calls separately for PF vs VF.
+ *
+ * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
+ * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
+ * Note2: 'x 3' to hold multiple capture groups
+ *
+ * GUC Runtime notify capture:
+ * --------------------------
+ * --> G2H STATE_CAPTURE_NOTIFICATION
+ * L--> intel_guc_capture_process
+ * L--> Loop through B (head..tail) and for each engine instance's
+ * err-state-captured register-list we find, we alloc 'C':
+ * --> alloc C: A capture-output-node structure that includes misc capture info along
+ * with 3 register list dumps (global, engine-class and engine-instance)
+ * This node is created from a pre-allocated list of blank nodes in
+ * guc->capture->cachelist and populated with the error-capture
+ * data from GuC and then it's added into guc->capture->outlist linked
+ * list. This list is used for matchup and printout by i915_gpu_coredump
+ * and err_print_gt, (when user invokes the error capture sysfs).
+ *
+ * GUC --> notify context reset:
+ * -----------------------------
+ * --> G2H CONTEXT RESET
+ * L--> guc_handle_context_reset --> i915_capture_error_state
+ * L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines
+ * --> capture_engine(..IS_GUC_CAPTURE)
+ * L--> intel_guc_capture_get_matching_node is where
+ * detach C from internal linked list and add it into
+ * intel_engine_coredump struct (if the context and
+ * engine of the event notification matches a node
+ * in the link list).
+ *
+ * User Sysfs / Debugfs
+ * --------------------
+ * --> i915_gpu_coredump_copy_to_buffer->
+ * L--> err_print_to_sgl --> err_print_gt
+ * L--> error_print_guc_captures
+ * L--> intel_guc_capture_print_node prints the
+ * register lists values of the attached node
+ * on the error-engine-dump being reported.
+ * L--> i915_reset_error_state ... -->__i915_gpu_coredump_free
+ * L--> ... cleanup_gt -->
+ * L--> intel_guc_capture_free_node returns the
+ * capture-output-node back to the internal
+ * cachelist for reuse.
+ *
+ */
+
+static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
+{
+ if (buf->wr >= buf->rd)
+ return (buf->wr - buf->rd);
+ return (buf->size - buf->rd) + buf->wr;
+}
+
+static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
+{
+ if (buf->rd > buf->wr)
+ return (buf->size - buf->rd);
+ return (buf->wr - buf->rd);
+}
+
+/*
+ * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
+ *
+ * The GuC Log buffer region for error-capture is managed like a ring buffer.
+ * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
+ * Additionally, as per the current and foreseeable future, all packed error-
+ * capture output structures are dword aligned.
+ *
+ * That said, if the GuC firmware is in the midst of writing a structure that is larger
+ * than one dword but the tail end of the err-capture buffer-region has lesser space left,
+ * we would need to extract that structure one dword at a time straddled across the end,
+ * onto the start of the ring.
+ *
+ * Below function, guc_capture_log_remove_dw is a helper for that. All callers of this
+ * function would typically do a straight-up memcpy from the ring contents and will only
+ * call this helper if their structure-extraction is straddling across the end of the
+ * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
+ * scalability for future expansion of output data types without requiring a redesign
+ * of the flow controls.
+ */
+static int
+guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ u32 *dw)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int tries = 2;
+ int avail = 0;
+ u32 *src_data;
+
+ if (!guc_capture_buf_cnt(buf))
+ return 0;
+
+ while (tries--) {
+ avail = guc_capture_buf_cnt_to_end(buf);
+ if (avail >= sizeof(u32)) {
+ src_data = (u32 *)(buf->data + buf->rd);
+ *dw = *src_data;
+ buf->rd += 4;
+ return 4;
+ }
+ if (avail)
+ drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n");
+ buf->rd = 0;
+ }
+
+ return 0;
+}
+
+static bool
+guc_capture_data_extracted(struct __guc_capture_bufstate *b,
+ int size, void *dest)
+{
+ if (guc_capture_buf_cnt_to_end(b) >= size) {
+ memcpy(dest, (b->data + b->rd), size);
+ b->rd += size;
+ return true;
+ }
+ return false;
+}
+
+static int
+guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_group_header_t *ghdr)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_state_capture_group_header_t);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner);
+ read += guc_capture_log_remove_dw(guc, buf, &ghdr->info);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static int
+guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_header_t *hdr)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_state_capture_header_t);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)hdr))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->owner);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->info);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static int
+guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_mmio_reg *reg)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_mmio_reg);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)reg))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &reg->offset);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->value);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->flags);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->mask);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static void
+guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
+{
+ int i;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
+ kfree(node->reginfo[i].regs);
+ list_del(&node->link);
+ kfree(node);
+}
+
+static void
+guc_capture_delete_prealloc_nodes(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /*
+ * NOTE: At the end of driver operation, we must assume that we
+ * have prealloc nodes in both the cachelist as well as outlist
+ * if unclaimed error capture events occurred prior to shutdown.
+ */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link)
+ guc_capture_delete_one_node(guc, n);
+
+ list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link)
+ guc_capture_delete_one_node(guc, n);
+}
+
+static void
+guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
+ struct list_head *list)
+{
+ list_add_tail(&node->link, list);
+}
+
+static void
+guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_add_node_to_list(node, &gc->outlist);
+}
+
+static void
+guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_add_node_to_list(node, &gc->cachelist);
+}
+
+static void
+guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
+{
+ struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX];
+ int i;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ tmp[i] = node->reginfo[i].regs;
+ memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
+ guc->capture->max_mmio_per_node);
+ }
+ memset(node, 0, sizeof(*node));
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
+ node->reginfo[i].regs = tmp[i];
+
+ INIT_LIST_HEAD(&node->link);
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_get_prealloc_node(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *found = NULL;
+
+ if (!list_empty(&guc->capture->cachelist)) {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /* get first avail node from the cache list */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
+ found = n;
+ list_del(&n->link);
+ break;
+ }
+ } else {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /* traverse down and steal back the oldest node already allocated */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
+ found = n;
+ }
+ if (found)
+ list_del(&found->link);
+ }
+ if (found)
+ guc_capture_init_node(guc, found);
+
+ return found;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_alloc_one_node(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node,
+ sizeof(struct guc_mmio_reg), GFP_KERNEL);
+ if (!new->reginfo[i].regs) {
+ while (i)
+ kfree(new->reginfo[--i].regs);
+ kfree(new);
+ return NULL;
+ }
+ }
+ guc_capture_init_node(guc, new);
+
+ return new;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original,
+ u32 keep_reglist_mask)
+{
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = guc_capture_get_prealloc_node(guc);
+ if (!new)
+ return NULL;
+ if (!original)
+ return new;
+
+ new->is_partial = original->is_partial;
+
+ /* copy reg-lists that we want to clone */
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ if (keep_reglist_mask & BIT(i)) {
+ GEM_BUG_ON(original->reginfo[i].num_regs >
+ guc->capture->max_mmio_per_node);
+
+ memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
+ original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
+
+ new->reginfo[i].num_regs = original->reginfo[i].num_regs;
+ new->reginfo[i].vfid = original->reginfo[i].vfid;
+
+ if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) {
+ new->eng_class = original->eng_class;
+ } else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
+ new->eng_inst = original->eng_inst;
+ new->guc_id = original->guc_id;
+ new->lrca = original->lrca;
+ }
+ }
+ }
+
+ return new;
+}
+
+static void
+__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *node = NULL;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int i;
+
+ for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
+ node = guc_capture_alloc_one_node(guc);
+ if (!node) {
+ drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n");
+ /* dont free the priors, use what we got and cleanup at shutdown */
+ return;
+ }
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+}
+
+static int
+guc_get_max_reglist_count(struct intel_guc *guc)
+{
+ int i, j, k, tmp, maxregcount = 0;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
+ for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
+ for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
+ if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0)
+ continue;
+
+ tmp = guc_cap_list_num_regs(guc->capture, i, j, k);
+ if (tmp > maxregcount)
+ maxregcount = tmp;
+ }
+ }
+ }
+ if (!maxregcount)
+ maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
+
+ return maxregcount;
+}
+
+static void
+guc_capture_create_prealloc_nodes(struct intel_guc *guc)
+{
+ /* skip if we've already done the pre-alloc */
+ if (guc->capture->max_mmio_per_node)
+ return;
+
+ guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
+ __guc_capture_create_prealloc_nodes(guc);
+}
+
+static int
+guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_state_capture_group_header_t ghdr = {0};
+ struct guc_state_capture_header_t hdr = {0};
+ struct __guc_capture_parsed_output *node = NULL;
+ struct guc_mmio_reg *regs = NULL;
+ int i, numlists, numregs, ret = 0;
+ enum guc_capture_type datatype;
+ struct guc_mmio_reg tmp;
+ bool is_partial = false;
+
+ i = guc_capture_buf_cnt(buf);
+ if (!i)
+ return -ENODATA;
+ if (i % sizeof(u32)) {
+ drm_warn(&i915->drm, "GuC Capture new entries unaligned\n");
+ ret = -EIO;
+ goto bailout;
+ }
+
+ /* first get the capture group header */
+ if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
+ ret = -EIO;
+ goto bailout;
+ }
+ /*
+ * we would typically expect a layout as below where n would be expected to be
+ * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
+ * instances being reset together.
+ * ____________________________________________
+ * | Capture Group |
+ * | ________________________________________ |
+ * | | Capture Group Header: | |
+ * | | - num_captures = 5 | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture1: | |
+ * | | Hdr: GLOBAL, numregs=a | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rega | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture2: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regb | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture3: | |
+ * | | Hdr: INSTANCE=RCS, numregs=c | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regc | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture4: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regd | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture5: | |
+ * | | Hdr: INSTANCE=CCS0, numregs=e | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rege | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * |__________________________________________|
+ */
+ is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info);
+ numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info);
+
+ while (numlists--) {
+ if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
+ ret = -EIO;
+ break;
+ }
+
+ datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info);
+ if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
+ /* unknown capture type - skip over to next capture set */
+ numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &tmp)) {
+ ret = -EIO;
+ break;
+ }
+ }
+ continue;
+ } else if (node) {
+ /*
+ * Based on the current capture type and what we have so far,
+ * decide if we should add the current node into the internal
+ * linked list for match-up when i915_gpu_coredump calls later
+ * (and alloc a blank node for the next set of reglists)
+ * or continue with the same node or clone the current node
+ * but only retain the global or class registers (such as the
+ * case of dependent engine resets).
+ */
+ if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS &&
+ node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) {
+ /* Add to list, clone node and duplicate global list */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ GCAP_PARSED_REGLIST_INDEX_GLOBAL);
+ } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE &&
+ node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) {
+ /* Add to list, clone node and duplicate global + class lists */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ (GCAP_PARSED_REGLIST_INDEX_GLOBAL |
+ GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
+ }
+ }
+
+ if (!node) {
+ node = guc_capture_get_prealloc_node(guc);
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
+ drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n",
+ datatype);
+ }
+ node->is_partial = is_partial;
+ node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner);
+ switch (datatype) {
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
+ node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info);
+ node->lrca = hdr.lrca;
+ node->guc_id = hdr.guc_id;
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
+ break;
+ default:
+ break;
+ }
+
+ numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
+ if (numregs > guc->capture->max_mmio_per_node) {
+ drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n");
+ numregs = guc->capture->max_mmio_per_node;
+ }
+ node->reginfo[datatype].num_regs = numregs;
+ regs = node->reginfo[datatype].regs;
+ i = 0;
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &regs[i++])) {
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+
+bailout:
+ if (node) {
+ /* If we have data, add to linked list for match-up when i915_gpu_coredump calls */
+ for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ if (node->reginfo[i].regs) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ break;
+ }
+ }
+ if (node) /* else return it back to cache list */
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+ return ret;
+}
+
+static int __guc_capture_flushlog_complete(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
+ GUC_CAPTURE_LOG_BUFFER
+ };
+
+ return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
+
+}
+
+static void __guc_capture_process_output(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, full_count;
+ struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_log_buffer_state log_buf_state_local;
+ struct guc_log_buffer_state *log_buf_state;
+ struct __guc_capture_bufstate buf;
+ void *src_data = NULL;
+ bool new_overflow;
+ int ret;
+
+ log_buf_state = guc->log.buf_addr +
+ (sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
+ src_data = guc->log.buf_addr +
+ intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER);
+
+ /*
+ * Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
+ buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_count = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file;
+ new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER,
+ full_count);
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ drm_err(&i915->drm, "invalid GuC log capture buffer state!\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ buf.size = buffer_size;
+ buf.rd = read_offset;
+ buf.wr = write_offset;
+ buf.data = src_data;
+
+ if (!uc->reset_in_progress) {
+ do {
+ ret = guc_capture_extract_reglists(guc, &buf);
+ } while (ret >= 0);
+ }
+
+ /* Update the state of log buffer err-cap state */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ __guc_capture_flushlog_complete(guc);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+static const char *
+guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
+ u32 class, u32 id, u32 offset, u32 *is_ext)
+{
+ const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+ int j;
+
+ *is_ext = 0;
+ if (!reglists)
+ return NULL;
+
+ match = guc_capture_get_one_list(reglists, owner, type, id);
+ if (!match)
+ return NULL;
+
+ for (j = 0; j < match->num_regs; ++j) {
+ if (offset == match->list[j].reg.reg)
+ return match->list[j].regname;
+ }
+ if (extlists) {
+ matchext = guc_capture_get_one_ext_list(extlists, owner, type, id);
+ if (!matchext)
+ return NULL;
+ for (j = 0; j < matchext->num_regs; ++j) {
+ if (offset == matchext->extlist[j].reg.reg) {
+ *is_ext = 1;
+ return matchext->extlist[j].regname;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
+ do { \
+ i915_error_printf(ebuf, " i915-Eng-Name: %s command stream\n", \
+ (eng)->name); \
+ i915_error_printf(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
+ i915_error_printf(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
+ i915_error_printf(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
+ (eng)->logical_mask); \
+ } while (0)
+
+#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
+ do { \
+ i915_error_printf(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
+ (node)->eng_inst); \
+ i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
+ i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
+ } while (0)
+
+int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
+ const struct intel_engine_coredump *ee)
+{
+ const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
+ "full-capture",
+ "partial-capture"
+ };
+ const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = {
+ "Global",
+ "Engine-Class",
+ "Engine-Instance"
+ };
+ struct intel_guc_state_capture *cap;
+ struct __guc_capture_parsed_output *node;
+ struct intel_engine_cs *eng;
+ struct guc_mmio_reg *regs;
+ struct intel_guc *guc;
+ const char *str;
+ int numregs, i, j;
+ u32 is_ext;
+
+ if (!ebuf || !ee)
+ return -EINVAL;
+ cap = ee->guc_capture;
+ if (!cap || !ee->engine)
+ return -ENODEV;
+
+ guc = &ee->engine->gt->uc.guc;
+
+ i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
+ ee->engine->name);
+
+ node = ee->guc_capture_node;
+ if (!node) {
+ i915_error_printf(ebuf, " No matching ee-node\n");
+ return 0;
+ }
+
+ i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
+
+ for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ i915_error_printf(ebuf, " RegListType: %s\n",
+ datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
+ i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
+
+ switch (i) {
+ case GUC_CAPTURE_LIST_TYPE_GLOBAL:
+ default:
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
+ i915_error_printf(ebuf, " i915-Eng-Class: %d\n",
+ guc_class_to_engine_class(node->eng_class));
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
+ if (eng)
+ GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
+ else
+ i915_error_printf(ebuf, " i915-Eng-Lookup Fail!\n");
+ GCAP_PRINT_GUC_INST_INFO(ebuf, node);
+ break;
+ }
+
+ numregs = node->reginfo[i].num_regs;
+ i915_error_printf(ebuf, " NumRegs: %d\n", numregs);
+ j = 0;
+ while (numregs--) {
+ regs = node->reginfo[i].regs;
+ str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
+ node->eng_class, 0, regs[j].offset, &is_ext);
+ if (!str)
+ i915_error_printf(ebuf, " REG-0x%08x", regs[j].offset);
+ else
+ i915_error_printf(ebuf, " %s", str);
+ if (is_ext)
+ i915_error_printf(ebuf, "[%ld][%ld]",
+ FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
+ FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
+ i915_error_printf(ebuf, ": 0x%08x\n", regs[j].value);
+ ++j;
+ }
+ }
+ return 0;
+}
+
+#endif //CONFIG_DRM_I915_CAPTURE_ERROR
+
+static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
+{
+ struct gcap_reg_list_info *reginfo;
+ struct guc_mmio_reg *regs;
+ i915_reg_t reg_ipehr = RING_IPEHR(0);
+ i915_reg_t reg_instdone = RING_INSTDONE(0);
+ int i;
+
+ if (!ee->guc_capture_node)
+ return;
+
+ reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
+ regs = reginfo->regs;
+ for (i = 0; i < reginfo->num_regs; i++) {
+ if (regs[i].offset == reg_ipehr.reg)
+ ee->ipehr = regs[i].value;
+ else if (regs[i].offset == reg_instdone.reg)
+ ee->instdone.instdone = regs[i].value;
+ }
+}
+
+void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
+{
+ if (!ee || !ee->guc_capture_node)
+ return;
+
+ guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node);
+ ee->guc_capture = NULL;
+ ee->guc_capture_node = NULL;
+}
+
+void intel_guc_capture_get_matching_node(struct intel_gt *gt,
+ struct intel_engine_coredump *ee,
+ struct intel_context *ce)
+{
+ struct __guc_capture_parsed_output *n, *ntmp;
+ struct drm_i915_private *i915;
+ struct intel_guc *guc;
+
+ if (!gt || !ee || !ce)
+ return;
+
+ i915 = gt->i915;
+ guc = &gt->uc.guc;
+ if (!guc->capture)
+ return;
+
+ GEM_BUG_ON(ee->guc_capture_node);
+ /*
+ * Look for a matching GuC reported error capture node from
+ * the internal output link-list based on lrca, guc-id and engine
+ * identification.
+ */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
+ if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
+ n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
+ n->guc_id && n->guc_id == ce->guc_id.id &&
+ (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) ==
+ (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ list_del(&n->link);
+ ee->guc_capture_node = n;
+ ee->guc_capture = guc->capture;
+ guc_capture_find_ecode(ee);
+ return;
+ }
+ }
+ drm_dbg(&i915->drm, "GuC capture can't match ee to node\n");
+}
+
+void intel_guc_capture_process(struct intel_guc *guc)
+{
+ if (guc->capture)
+ __guc_capture_process_output(guc);
+}
+
+static void
+guc_capture_free_ads_cache(struct intel_guc_state_capture *gc)
+{
+ int i, j, k;
+ struct __guc_capture_ads_cache *cache;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
+ for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
+ for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
+ cache = &gc->ads_cache[i][j][k];
+ if (cache->is_valid)
+ kfree(cache->ptr);
+ }
+ }
+ }
+ kfree(gc->ads_null_cache);
+}
+
+void intel_guc_capture_destroy(struct intel_guc *guc)
+{
+ if (!guc->capture)
+ return;
+
+ guc_capture_free_ads_cache(guc->capture);
+
+ guc_capture_delete_prealloc_nodes(guc);
+
+ guc_capture_free_extlists(guc->capture->extlists);
+ kfree(guc->capture->extlists);
+
+ kfree(guc->capture);
+ guc->capture = NULL;
+}
+
+int intel_guc_capture_init(struct intel_guc *guc)
+{
+ guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL);
+ if (!guc->capture)
+ return -ENOMEM;
+
+ guc->capture->reglists = guc_capture_get_device_reglist(guc);
+
+ INIT_LIST_HEAD(&guc->capture->outlist);
+ INIT_LIST_HEAD(&guc->capture->cachelist);
+
+ check_guc_capture_size(guc);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
new file mode 100644
index 000000000..fbd3713c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_CAPTURE_H
+#define _INTEL_GUC_CAPTURE_H
+
+#include <linux/types.h>
+
+struct drm_i915_error_state_buf;
+struct guc_gt_system_info;
+struct intel_engine_coredump;
+struct intel_context;
+struct intel_gt;
+struct intel_guc;
+
+void intel_guc_capture_free_node(struct intel_engine_coredump *ee);
+int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_coredump *ee);
+void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
+ struct intel_context *ce);
+void intel_guc_capture_process(struct intel_guc *guc);
+int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ void **outptr);
+int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size);
+int intel_guc_capture_getnullheader(struct intel_guc *guc, void **outptr, size_t *size);
+void intel_guc_capture_destroy(struct intel_guc *guc);
+int intel_guc_capture_init(struct intel_guc *guc);
+
+#endif /* _INTEL_GUC_CAPTURE_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
new file mode 100644
index 000000000..2b22065e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -0,0 +1,1250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/ktime.h>
+#include <linux/time64.h>
+#include <linux/string_helpers.h>
+#include <linux/timekeeping.h>
+
+#include "i915_drv.h"
+#include "intel_guc_ct.h"
+#include "gt/intel_gt.h"
+
+static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
+{
+ return container_of(ct, struct intel_guc, ct);
+}
+
+static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
+{
+ return guc_to_gt(ct_to_guc(ct));
+}
+
+static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
+{
+ return ct_to_gt(ct)->i915;
+}
+
+static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
+{
+ return &ct_to_i915(ct)->drm;
+}
+
+#define CT_ERROR(_ct, _fmt, ...) \
+ drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define CT_DEBUG(_ct, _fmt, ...) \
+ drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
+#else
+#define CT_DEBUG(...) do { } while (0)
+#endif
+#define CT_PROBE_ERROR(_ct, _fmt, ...) \
+ i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
+
+/**
+ * DOC: CTB Blob
+ *
+ * We allocate single blob to hold both CTB descriptors and buffers:
+ *
+ * +--------+-----------------------------------------------+------+
+ * | offset | contents | size |
+ * +========+===============================================+======+
+ * | 0x0000 | H2G `CTB Descriptor`_ (send) | |
+ * +--------+-----------------------------------------------+ 4K |
+ * | 0x0800 | G2H `CTB Descriptor`_ (recv) | |
+ * +--------+-----------------------------------------------+------+
+ * | 0x1000 | H2G `CT Buffer`_ (send) | n*4K |
+ * | | | |
+ * +--------+-----------------------------------------------+------+
+ * | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K |
+ * | + n*4K | | |
+ * +--------+-----------------------------------------------+------+
+ *
+ * Size of each `CT Buffer`_ must be multiple of 4K.
+ * We don't expect too many messages in flight at any time, unless we are
+ * using the GuC submission. In that case each request requires a minimum
+ * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
+ * enough space to avoid backpressure on the driver. We increase the size
+ * of the receive buffer (relative to the send) to ensure a G2H response
+ * CTB has a landing spot.
+ */
+#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
+#define CTB_H2G_BUFFER_SIZE (SZ_4K)
+#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
+#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
+
+struct ct_request {
+ struct list_head link;
+ u32 fence;
+ u32 status;
+ u32 response_len;
+ u32 *response_buf;
+};
+
+struct ct_incoming_msg {
+ struct list_head link;
+ u32 size;
+ u32 msg[];
+};
+
+enum { CTB_SEND = 0, CTB_RECV = 1 };
+
+enum { CTB_OWNER_HOST = 0 };
+
+static void ct_receive_tasklet_func(struct tasklet_struct *t);
+static void ct_incoming_request_worker_func(struct work_struct *w);
+
+/**
+ * intel_guc_ct_init_early - Initialize CT state without requiring device access
+ * @ct: pointer to CT struct
+ */
+void intel_guc_ct_init_early(struct intel_guc_ct *ct)
+{
+ spin_lock_init(&ct->ctbs.send.lock);
+ spin_lock_init(&ct->ctbs.recv.lock);
+ spin_lock_init(&ct->requests.lock);
+ INIT_LIST_HEAD(&ct->requests.pending);
+ INIT_LIST_HEAD(&ct->requests.incoming);
+ INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
+ tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
+ init_waitqueue_head(&ct->wq);
+}
+
+static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
+{
+ memset(desc, 0, sizeof(*desc));
+}
+
+static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
+{
+ u32 space;
+
+ ctb->broken = false;
+ ctb->tail = 0;
+ ctb->head = 0;
+ space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
+ atomic_set(&ctb->space, space);
+
+ guc_ct_buffer_desc_init(ctb->desc);
+}
+
+static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
+ struct guc_ct_buffer_desc *desc,
+ u32 *cmds, u32 size_in_bytes, u32 resv_space)
+{
+ GEM_BUG_ON(size_in_bytes % 4);
+
+ ctb->desc = desc;
+ ctb->cmds = cmds;
+ ctb->size = size_in_bytes / 4;
+ ctb->resv_space = resv_space / 4;
+
+ guc_ct_buffer_reset(ctb);
+}
+
+static int guc_action_control_ctb(struct intel_guc *guc, u32 control)
+{
+ u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_CONTROL_CTB),
+ FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, control),
+ };
+ int ret;
+
+ GEM_BUG_ON(control != GUC_CTB_CONTROL_DISABLE && control != GUC_CTB_CONTROL_ENABLE);
+
+ /* CT control must go over MMIO */
+ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int ct_control_enable(struct intel_guc_ct *ct, bool enable)
+{
+ int err;
+
+ err = guc_action_control_ctb(ct_to_guc(ct), enable ?
+ GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE);
+ if (unlikely(err))
+ CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(err));
+
+ return err;
+}
+
+static int ct_register_buffer(struct intel_guc_ct *ct, bool send,
+ u32 desc_addr, u32 buff_addr, u32 size)
+{
+ int err;
+
+ err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
+ desc_addr);
+ if (unlikely(err))
+ goto failed;
+
+ err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
+ buff_addr);
+ if (unlikely(err))
+ goto failed;
+
+ err = intel_guc_self_cfg32(ct_to_guc(ct), send ?
+ GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY :
+ GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
+ size);
+ if (unlikely(err))
+failed:
+ CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n",
+ send ? "SEND" : "RECV", ERR_PTR(err));
+
+ return err;
+}
+
+/**
+ * intel_guc_ct_init - Init buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for buffer-based communication.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct guc_ct_buffer_desc *desc;
+ u32 blob_size;
+ u32 cmds_size;
+ u32 resv_space;
+ void *blob;
+ u32 *cmds;
+ int err;
+
+ err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(ct->vma);
+
+ blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
+ err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
+ if (unlikely(err)) {
+ CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
+ blob_size, ERR_PTR(err));
+ return err;
+ }
+
+ CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
+
+ /* store pointers to desc and cmds for send ctb */
+ desc = blob;
+ cmds = blob + 2 * CTB_DESC_SIZE;
+ cmds_size = CTB_H2G_BUFFER_SIZE;
+ resv_space = 0;
+ CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
+ ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
+ resv_space);
+
+ guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
+
+ /* store pointers to desc and cmds for recv ctb */
+ desc = blob + CTB_DESC_SIZE;
+ cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
+ cmds_size = CTB_G2H_BUFFER_SIZE;
+ resv_space = G2H_ROOM_BUFFER_SIZE;
+ CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
+ ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
+ resv_space);
+
+ guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
+
+ return 0;
+}
+
+/**
+ * intel_guc_ct_fini - Fini buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for buffer-based communication.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
+{
+ GEM_BUG_ON(ct->enabled);
+
+ tasklet_kill(&ct->receive_tasklet);
+ i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
+ memset(ct, 0, sizeof(*ct));
+}
+
+/**
+ * intel_guc_ct_enable - Enable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_enable(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ u32 base, desc, cmds, size;
+ void *blob;
+ int err;
+
+ GEM_BUG_ON(ct->enabled);
+
+ /* vma should be already allocated and map'ed */
+ GEM_BUG_ON(!ct->vma);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
+ base = intel_guc_ggtt_offset(guc, ct->vma);
+
+ /* blob should start with send descriptor */
+ blob = __px_vaddr(ct->vma->obj);
+ GEM_BUG_ON(blob != ct->ctbs.send.desc);
+
+ /* (re)initialize descriptors */
+ guc_ct_buffer_reset(&ct->ctbs.send);
+ guc_ct_buffer_reset(&ct->ctbs.recv);
+
+ /*
+ * Register both CT buffers starting with RECV buffer.
+ * Descriptors are in first half of the blob.
+ */
+ desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
+ cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
+ size = ct->ctbs.recv.size * 4;
+ err = ct_register_buffer(ct, false, desc, cmds, size);
+ if (unlikely(err))
+ goto err_out;
+
+ desc = base + ptrdiff(ct->ctbs.send.desc, blob);
+ cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
+ size = ct->ctbs.send.size * 4;
+ err = ct_register_buffer(ct, true, desc, cmds, size);
+ if (unlikely(err))
+ goto err_out;
+
+ err = ct_control_enable(ct, true);
+ if (unlikely(err))
+ goto err_out;
+
+ ct->enabled = true;
+ ct->stall_time = KTIME_MAX;
+
+ return 0;
+
+err_out:
+ CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
+ return err;
+}
+
+/**
+ * intel_guc_ct_disable - Disable buffer based command transport.
+ * @ct: pointer to CT struct
+ */
+void intel_guc_ct_disable(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+
+ GEM_BUG_ON(!ct->enabled);
+
+ ct->enabled = false;
+
+ if (intel_guc_is_fw_running(guc)) {
+ ct_control_enable(ct, false);
+ }
+}
+
+static u32 ct_get_next_fence(struct intel_guc_ct *ct)
+{
+ /* For now it's trivial */
+ return ++ct->requests.last_fence;
+}
+
+static int ct_write(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len /* in dwords */,
+ u32 fence, u32 flags)
+{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
+ struct guc_ct_buffer_desc *desc = ctb->desc;
+ u32 tail = ctb->tail;
+ u32 size = ctb->size;
+ u32 header;
+ u32 hxg;
+ u32 type;
+ u32 *cmds = ctb->cmds;
+ unsigned int i;
+
+ if (unlikely(desc->status))
+ goto corrupted;
+
+ GEM_BUG_ON(tail > size);
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+ if (unlikely(tail != READ_ONCE(desc->tail))) {
+ CT_ERROR(ct, "Tail was modified %u != %u\n",
+ desc->tail, tail);
+ desc->status |= GUC_CTB_STATUS_MISMATCH;
+ goto corrupted;
+ }
+ if (unlikely(READ_ONCE(desc->head) >= size)) {
+ CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
+ desc->head, size);
+ desc->status |= GUC_CTB_STATUS_OVERFLOW;
+ goto corrupted;
+ }
+#endif
+
+ /*
+ * dw0: CT header (including fence)
+ * dw1: HXG header (including action code)
+ * dw2+: action data
+ */
+ header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
+ FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
+ FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
+
+ type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT :
+ GUC_HXG_TYPE_REQUEST;
+ hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
+ GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+
+ CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
+ tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
+
+ cmds[tail] = header;
+ tail = (tail + 1) % size;
+
+ cmds[tail] = hxg;
+ tail = (tail + 1) % size;
+
+ for (i = 1; i < len; i++) {
+ cmds[tail] = action[i];
+ tail = (tail + 1) % size;
+ }
+ GEM_BUG_ON(tail > size);
+
+ /*
+ * make sure H2G buffer update and LRC tail update (if this triggering a
+ * submission) are visible before updating the descriptor tail
+ */
+ intel_guc_write_barrier(ct_to_guc(ct));
+
+ /* update local copies */
+ ctb->tail = tail;
+ GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
+ atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
+
+ /* now update descriptor */
+ WRITE_ONCE(desc->tail, tail);
+
+ return 0;
+
+corrupted:
+ CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
+ desc->head, desc->tail, desc->status);
+ ctb->broken = true;
+ return -EPIPE;
+}
+
+/**
+ * wait_for_ct_request_update - Wait for CT request state update.
+ * @ct: pointer to CT
+ * @req: pointer to pending request
+ * @status: placeholder for status
+ *
+ * For each sent request, GuC shall send back CT response message.
+ * Our message handler will update status of tracked request once
+ * response message with given fence is received. Wait here and
+ * check for valid response status value.
+ *
+ * Return:
+ * * 0 response received (status is valid)
+ * * -ETIMEDOUT no response within hardcoded timeout
+ */
+static int wait_for_ct_request_update(struct intel_guc_ct *ct, struct ct_request *req, u32 *status)
+{
+ int err;
+ bool ct_enabled;
+
+ /*
+ * Fast commands should complete in less than 10us, so sample quickly
+ * up to that length of time, then switch to a slower sleep-wait loop.
+ * No GuC command should ever take longer than 10ms but many GuC
+ * commands can be inflight at time, so use a 1s timeout on the slower
+ * sleep-wait loop.
+ */
+#define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
+#define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
+#define done \
+ (!(ct_enabled = intel_guc_ct_enabled(ct)) || \
+ FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
+ GUC_HXG_ORIGIN_GUC)
+ err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
+ if (err)
+ err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
+#undef done
+ if (!ct_enabled)
+ err = -ENODEV;
+
+ *status = req->status;
+ return err;
+}
+
+#define GUC_CTB_TIMEOUT_MS 1500
+static inline bool ct_deadlocked(struct intel_guc_ct *ct)
+{
+ long timeout = GUC_CTB_TIMEOUT_MS;
+ bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
+
+ if (unlikely(ret)) {
+ struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
+ struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
+
+ CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
+ ktime_ms_delta(ktime_get(), ct->stall_time),
+ send->status, recv->status);
+ CT_ERROR(ct, "H2G Space: %u (Bytes)\n",
+ atomic_read(&ct->ctbs.send.space) * 4);
+ CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head);
+ CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail);
+ CT_ERROR(ct, "G2H Space: %u (Bytes)\n",
+ atomic_read(&ct->ctbs.recv.space) * 4);
+ CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
+ CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
+
+ ct->ctbs.send.broken = true;
+ }
+
+ return ret;
+}
+
+static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
+
+ /*
+ * We leave a certain amount of space in the G2H CTB buffer for
+ * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
+ */
+ return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
+}
+
+static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+ lockdep_assert_held(&ct->ctbs.send.lock);
+
+ GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
+
+ if (g2h_len_dw)
+ atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
+}
+
+static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+ atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
+}
+
+static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
+{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
+ struct guc_ct_buffer_desc *desc = ctb->desc;
+ u32 head;
+ u32 space;
+
+ if (atomic_read(&ctb->space) >= len_dw)
+ return true;
+
+ head = READ_ONCE(desc->head);
+ if (unlikely(head > ctb->size)) {
+ CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
+ head, ctb->size);
+ desc->status |= GUC_CTB_STATUS_OVERFLOW;
+ ctb->broken = true;
+ return false;
+ }
+
+ space = CIRC_SPACE(ctb->tail, head, ctb->size);
+ atomic_set(&ctb->space, space);
+
+ return space >= len_dw;
+}
+
+static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
+{
+ bool h2g = h2g_has_room(ct, h2g_dw);
+ bool g2h = g2h_has_room(ct, g2h_dw);
+
+ lockdep_assert_held(&ct->ctbs.send.lock);
+
+ if (unlikely(!h2g || !g2h)) {
+ if (ct->stall_time == KTIME_MAX)
+ ct->stall_time = ktime_get();
+
+ /* Be paranoid and kick G2H tasklet to free credits */
+ if (!g2h)
+ tasklet_hi_schedule(&ct->receive_tasklet);
+
+ if (unlikely(ct_deadlocked(ct)))
+ return -EPIPE;
+ else
+ return -EBUSY;
+ }
+
+ ct->stall_time = KTIME_MAX;
+ return 0;
+}
+
+#define G2H_LEN_DW(f) ({ \
+ typeof(f) f_ = (f); \
+ FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
+ FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
+ GUC_CTB_HXG_MSG_MIN_LEN : 0; \
+})
+static int ct_send_nb(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len,
+ u32 flags)
+{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
+ unsigned long spin_flags;
+ u32 g2h_len_dw = G2H_LEN_DW(flags);
+ u32 fence;
+ int ret;
+
+ spin_lock_irqsave(&ctb->lock, spin_flags);
+
+ ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
+ if (unlikely(ret))
+ goto out;
+
+ fence = ct_get_next_fence(ct);
+ ret = ct_write(ct, action, len, fence, flags);
+ if (unlikely(ret))
+ goto out;
+
+ g2h_reserve_space(ct, g2h_len_dw);
+ intel_guc_notify(ct_to_guc(ct));
+
+out:
+ spin_unlock_irqrestore(&ctb->lock, spin_flags);
+
+ return ret;
+}
+
+static int ct_send(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len,
+ u32 *response_buf,
+ u32 response_buf_size,
+ u32 *status)
+{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
+ struct ct_request request;
+ unsigned long flags;
+ unsigned int sleep_period_ms = 1;
+ bool send_again;
+ u32 fence;
+ int err;
+
+ GEM_BUG_ON(!ct->enabled);
+ GEM_BUG_ON(!len);
+ GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
+ GEM_BUG_ON(!response_buf && response_buf_size);
+ might_sleep();
+
+resend:
+ send_again = false;
+
+ /*
+ * We use a lazy spin wait loop here as we believe that if the CT
+ * buffers are sized correctly the flow control condition should be
+ * rare. Reserving the maximum size in the G2H credits as we don't know
+ * how big the response is going to be.
+ */
+retry:
+ spin_lock_irqsave(&ctb->lock, flags);
+ if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
+ !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
+ if (ct->stall_time == KTIME_MAX)
+ ct->stall_time = ktime_get();
+ spin_unlock_irqrestore(&ctb->lock, flags);
+
+ if (unlikely(ct_deadlocked(ct)))
+ return -EPIPE;
+
+ if (msleep_interruptible(sleep_period_ms))
+ return -EINTR;
+ sleep_period_ms = sleep_period_ms << 1;
+
+ goto retry;
+ }
+
+ ct->stall_time = KTIME_MAX;
+
+ fence = ct_get_next_fence(ct);
+ request.fence = fence;
+ request.status = 0;
+ request.response_len = response_buf_size;
+ request.response_buf = response_buf;
+
+ spin_lock(&ct->requests.lock);
+ list_add_tail(&request.link, &ct->requests.pending);
+ spin_unlock(&ct->requests.lock);
+
+ err = ct_write(ct, action, len, fence, 0);
+ g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
+
+ spin_unlock_irqrestore(&ctb->lock, flags);
+
+ if (unlikely(err))
+ goto unlink;
+
+ intel_guc_notify(ct_to_guc(ct));
+
+ err = wait_for_ct_request_update(ct, &request, status);
+ g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
+ if (unlikely(err)) {
+ if (err == -ENODEV)
+ /* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress.
+ * In this case, output is debug rather than error info
+ */
+ CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n",
+ action[0], request.fence);
+ else
+ CT_ERROR(ct, "No response for request %#x (fence %u)\n",
+ action[0], request.fence);
+ goto unlink;
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
+ CT_DEBUG(ct, "retrying request %#x (%u)\n", *action,
+ FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status));
+ send_again = true;
+ goto unlink;
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
+ err = -EIO;
+ goto unlink;
+ }
+
+ if (response_buf) {
+ /* There shall be no data in the status */
+ WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status));
+ /* Return actual response len */
+ err = request.response_len;
+ } else {
+ /* There shall be no response payload */
+ WARN_ON(request.response_len);
+ /* Return data decoded from the status dword */
+ err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status);
+ }
+
+unlink:
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_del(&request.link);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
+
+ if (unlikely(send_again))
+ goto resend;
+
+ return err;
+}
+
+/*
+ * Command Transport (CT) buffer based GuC send function.
+ */
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size, u32 flags)
+{
+ u32 status = ~0; /* undefined */
+ int ret;
+
+ if (unlikely(!ct->enabled)) {
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+
+ WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action);
+ return -ENODEV;
+ }
+
+ if (unlikely(ct->ctbs.send.broken))
+ return -EPIPE;
+
+ if (flags & INTEL_GUC_CT_SEND_NB)
+ return ct_send_nb(ct, action, len, flags);
+
+ ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
+ if (unlikely(ret < 0)) {
+ if (ret != -ENODEV)
+ CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
+ action[0], ERR_PTR(ret), status);
+ } else if (unlikely(ret)) {
+ CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
+ action[0], ret, ret);
+ }
+
+ return ret;
+}
+
+static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
+{
+ struct ct_incoming_msg *msg;
+
+ msg = kmalloc(struct_size(msg, msg, num_dwords), GFP_ATOMIC);
+ if (msg)
+ msg->size = num_dwords;
+ return msg;
+}
+
+static void ct_free_msg(struct ct_incoming_msg *msg)
+{
+ kfree(msg);
+}
+
+/*
+ * Return: number available remaining dwords to read (0 if empty)
+ * or a negative error code on failure
+ */
+static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
+{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
+ struct guc_ct_buffer_desc *desc = ctb->desc;
+ u32 head = ctb->head;
+ u32 tail = READ_ONCE(desc->tail);
+ u32 size = ctb->size;
+ u32 *cmds = ctb->cmds;
+ s32 available;
+ unsigned int len;
+ unsigned int i;
+ u32 header;
+
+ if (unlikely(ctb->broken))
+ return -EPIPE;
+
+ if (unlikely(desc->status)) {
+ u32 status = desc->status;
+
+ if (status & GUC_CTB_STATUS_UNUSED) {
+ /*
+ * Potentially valid if a CLIENT_RESET request resulted in
+ * contexts/engines being reset. But should never happen as
+ * no contexts should be active when CLIENT_RESET is sent.
+ */
+ CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n");
+ status &= ~GUC_CTB_STATUS_UNUSED;
+ }
+
+ if (status)
+ goto corrupted;
+ }
+
+ GEM_BUG_ON(head > size);
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+ if (unlikely(head != READ_ONCE(desc->head))) {
+ CT_ERROR(ct, "Head was modified %u != %u\n",
+ desc->head, head);
+ desc->status |= GUC_CTB_STATUS_MISMATCH;
+ goto corrupted;
+ }
+#endif
+ if (unlikely(tail >= size)) {
+ CT_ERROR(ct, "Invalid tail offset %u >= %u)\n",
+ tail, size);
+ desc->status |= GUC_CTB_STATUS_OVERFLOW;
+ goto corrupted;
+ }
+
+ /* tail == head condition indicates empty */
+ available = tail - head;
+ if (unlikely(available == 0)) {
+ *msg = NULL;
+ return 0;
+ }
+
+ /* beware of buffer wrap case */
+ if (unlikely(available < 0))
+ available += size;
+ CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size);
+ GEM_BUG_ON(available < 0);
+
+ header = cmds[head];
+ head = (head + 1) % size;
+
+ /* message len with header */
+ len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN;
+ if (unlikely(len > (u32)available)) {
+ CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
+ 4, &header,
+ 4 * (head + available - 1 > size ?
+ size - head : available - 1), &cmds[head],
+ 4 * (head + available - 1 > size ?
+ available - 1 - size + head : 0), &cmds[0]);
+ desc->status |= GUC_CTB_STATUS_UNDERFLOW;
+ goto corrupted;
+ }
+
+ *msg = ct_alloc_msg(len);
+ if (!*msg) {
+ CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
+ 4, &header,
+ 4 * (head + available - 1 > size ?
+ size - head : available - 1), &cmds[head],
+ 4 * (head + available - 1 > size ?
+ available - 1 - size + head : 0), &cmds[0]);
+ return available;
+ }
+
+ (*msg)->msg[0] = header;
+
+ for (i = 1; i < len; i++) {
+ (*msg)->msg[i] = cmds[head];
+ head = (head + 1) % size;
+ }
+ CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
+
+ /* update local copies */
+ ctb->head = head;
+
+ /* now update descriptor */
+ WRITE_ONCE(desc->head, head);
+
+ return available - len;
+
+corrupted:
+ CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
+ desc->head, desc->tail, desc->status);
+ ctb->broken = true;
+ return -EPIPE;
+}
+
+static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
+{
+ u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
+ u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]);
+ const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN];
+ const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN];
+ u32 datalen = len - GUC_HXG_MSG_MIN_LEN;
+ struct ct_request *req;
+ unsigned long flags;
+ bool found = false;
+ int err = 0;
+
+ GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
+ GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
+ GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
+ FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY &&
+ FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
+
+ CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
+
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_for_each_entry(req, &ct->requests.pending, link) {
+ if (unlikely(fence != req->fence)) {
+ CT_DEBUG(ct, "request %u awaits response\n",
+ req->fence);
+ continue;
+ }
+ if (unlikely(datalen > req->response_len)) {
+ CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
+ req->fence, datalen, req->response_len);
+ datalen = min(datalen, req->response_len);
+ err = -EMSGSIZE;
+ }
+ if (datalen)
+ memcpy(req->response_buf, data, 4 * datalen);
+ req->response_len = datalen;
+ WRITE_ONCE(req->status, hxg[0]);
+ found = true;
+ break;
+ }
+ if (!found) {
+ CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
+ CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence,
+ ct->requests.last_fence);
+ list_for_each_entry(req, &ct->requests.pending, link)
+ CT_ERROR(ct, "request %u awaits response\n",
+ req->fence);
+ err = -ENOKEY;
+ }
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
+
+ if (unlikely(err))
+ return err;
+
+ ct_free_msg(response);
+ return 0;
+}
+
+static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ const u32 *hxg;
+ const u32 *payload;
+ u32 hxg_len, action, len;
+ int ret;
+
+ hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
+ hxg_len = request->size - GUC_CTB_MSG_MIN_LEN;
+ payload = &hxg[GUC_HXG_MSG_MIN_LEN];
+ action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+ len = hxg_len - GUC_HXG_MSG_MIN_LEN;
+
+ CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
+
+ switch (action) {
+ case INTEL_GUC_ACTION_DEFAULT:
+ ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
+ break;
+ case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+ ret = intel_guc_deregister_done_process_msg(guc, payload,
+ len);
+ break;
+ case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+ ret = intel_guc_sched_done_process_msg(guc, payload, len);
+ break;
+ case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
+ ret = intel_guc_context_reset_process_msg(guc, payload, len);
+ break;
+ case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
+ ret = intel_guc_error_capture_process_msg(guc, payload, len);
+ if (unlikely(ret))
+ CT_ERROR(ct, "error capture notification failed %x %*ph\n",
+ action, 4 * len, payload);
+ break;
+ case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
+ ret = intel_guc_engine_failure_process_msg(guc, payload, len);
+ break;
+ case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
+ intel_guc_log_handle_flush_event(&guc->log);
+ ret = 0;
+ break;
+ case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
+ CT_ERROR(ct, "Received GuC crash dump notification!\n");
+ ret = 0;
+ break;
+ case INTEL_GUC_ACTION_NOTIFY_EXCEPTION:
+ CT_ERROR(ct, "Received GuC exception notification!\n");
+ ret = 0;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (unlikely(ret)) {
+ CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
+ action, ERR_PTR(ret));
+ return ret;
+ }
+
+ ct_free_msg(request);
+ return 0;
+}
+
+static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
+{
+ unsigned long flags;
+ struct ct_incoming_msg *request;
+ bool done;
+ int err;
+
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ request = list_first_entry_or_null(&ct->requests.incoming,
+ struct ct_incoming_msg, link);
+ if (request)
+ list_del(&request->link);
+ done = !!list_empty(&ct->requests.incoming);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
+
+ if (!request)
+ return true;
+
+ err = ct_process_request(ct, request);
+ if (unlikely(err)) {
+ CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
+ ERR_PTR(err), 4 * request->size, request->msg);
+ ct_free_msg(request);
+ }
+
+ return done;
+}
+
+static void ct_incoming_request_worker_func(struct work_struct *w)
+{
+ struct intel_guc_ct *ct =
+ container_of(w, struct intel_guc_ct, requests.worker);
+ bool done;
+
+ do {
+ done = ct_process_incoming_requests(ct);
+ } while (!done);
+}
+
+static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
+{
+ const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+ unsigned long flags;
+
+ GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
+
+ /*
+ * Adjusting the space must be done in IRQ or deadlock can occur as the
+ * CTB processing in the below workqueue can send CTBs which creates a
+ * circular dependency if the space was returned there.
+ */
+ switch (action) {
+ case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+ case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+ g2h_release_space(ct, request->size);
+ }
+
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_add_tail(&request->link, &ct->requests.incoming);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
+
+ queue_work(system_unbound_wq, &ct->requests.worker);
+ return 0;
+}
+
+static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
+{
+ u32 origin, type;
+ u32 *hxg;
+ int err;
+
+ if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN))
+ return -EBADMSG;
+
+ hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN];
+
+ origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
+ if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
+ err = -EPROTO;
+ goto failed;
+ }
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
+ switch (type) {
+ case GUC_HXG_TYPE_EVENT:
+ err = ct_handle_event(ct, msg);
+ break;
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ err = ct_handle_response(ct, msg);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (unlikely(err)) {
+failed:
+ CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n",
+ ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg);
+ }
+ return err;
+}
+
+static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
+{
+ u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]);
+ int err;
+
+ if (format == GUC_CTB_FORMAT_HXG)
+ err = ct_handle_hxg(ct, msg);
+ else
+ err = -EOPNOTSUPP;
+
+ if (unlikely(err)) {
+ CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
+ ERR_PTR(err), 4 * msg->size, msg->msg);
+ ct_free_msg(msg);
+ }
+}
+
+/*
+ * Return: number available remaining dwords to read (0 if empty)
+ * or a negative error code on failure
+ */
+static int ct_receive(struct intel_guc_ct *ct)
+{
+ struct ct_incoming_msg *msg = NULL;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
+ ret = ct_read(ct, &msg);
+ spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
+ if (ret < 0)
+ return ret;
+
+ if (msg)
+ ct_handle_msg(ct, msg);
+
+ return ret;
+}
+
+static void ct_try_receive_message(struct intel_guc_ct *ct)
+{
+ int ret;
+
+ if (GEM_WARN_ON(!ct->enabled))
+ return;
+
+ ret = ct_receive(ct);
+ if (ret > 0)
+ tasklet_hi_schedule(&ct->receive_tasklet);
+}
+
+static void ct_receive_tasklet_func(struct tasklet_struct *t)
+{
+ struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
+
+ ct_try_receive_message(ct);
+}
+
+/*
+ * When we're communicating with the GuC over CT, GuC uses events
+ * to notify us about new messages being posted on the RECV buffer.
+ */
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
+{
+ if (unlikely(!ct->enabled)) {
+ WARN(1, "Unexpected GuC event received while CT disabled!\n");
+ return;
+ }
+
+ ct_try_receive_message(ct);
+}
+
+void intel_guc_ct_print_info(struct intel_guc_ct *ct,
+ struct drm_printer *p)
+{
+ drm_printf(p, "CT %s\n", str_enabled_disabled(ct->enabled));
+
+ if (!ct->enabled)
+ return;
+
+ drm_printf(p, "H2G Space: %u\n",
+ atomic_read(&ct->ctbs.send.space) * 4);
+ drm_printf(p, "Head: %u\n",
+ ct->ctbs.send.desc->head);
+ drm_printf(p, "Tail: %u\n",
+ ct->ctbs.send.desc->tail);
+ drm_printf(p, "G2H Space: %u\n",
+ atomic_read(&ct->ctbs.recv.space) * 4);
+ drm_printf(p, "Head: %u\n",
+ ct->ctbs.recv.desc->head);
+ drm_printf(p, "Tail: %u\n",
+ ct->ctbs.recv.desc->tail);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
new file mode 100644
index 000000000..f709a19c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_CT_H_
+#define _INTEL_GUC_CT_H_
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/ktime.h>
+#include <linux/wait.h>
+
+#include "intel_guc_fwif.h"
+
+struct i915_vma;
+struct intel_guc;
+struct drm_printer;
+
+/**
+ * DOC: Command Transport (CT).
+ *
+ * Buffer based command transport is a replacement for MMIO based mechanism.
+ * It can be used to perform both host-2-guc and guc-to-host communication.
+ */
+
+/** Represents single command transport buffer.
+ *
+ * A single command transport buffer consists of two parts, the header
+ * record (command transport buffer descriptor) and the actual buffer which
+ * holds the commands.
+ *
+ * @lock: protects access to the commands buffer and buffer descriptor
+ * @desc: pointer to the buffer descriptor
+ * @cmds: pointer to the commands buffer
+ * @size: size of the commands buffer in dwords
+ * @resv_space: reserved space in buffer in dwords
+ * @head: local shadow copy of head in dwords
+ * @tail: local shadow copy of tail in dwords
+ * @space: local shadow copy of space in dwords
+ * @broken: flag to indicate if descriptor data is broken
+ */
+struct intel_guc_ct_buffer {
+ spinlock_t lock;
+ struct guc_ct_buffer_desc *desc;
+ u32 *cmds;
+ u32 size;
+ u32 resv_space;
+ u32 tail;
+ u32 head;
+ atomic_t space;
+ bool broken;
+};
+
+/** Top-level structure for Command Transport related data
+ *
+ * Includes a pair of CT buffers for bi-directional communication and tracking
+ * for the H2G and G2H requests sent and received through the buffers.
+ */
+struct intel_guc_ct {
+ struct i915_vma *vma;
+ bool enabled;
+
+ /* buffers for sending and receiving commands */
+ struct {
+ struct intel_guc_ct_buffer send;
+ struct intel_guc_ct_buffer recv;
+ } ctbs;
+
+ struct tasklet_struct receive_tasklet;
+
+ /** @wq: wait queue for g2h chanenl */
+ wait_queue_head_t wq;
+
+ struct {
+ u16 last_fence; /* last fence used to send request */
+
+ spinlock_t lock; /* protects pending requests list */
+ struct list_head pending; /* requests waiting for response */
+
+ struct list_head incoming; /* incoming requests */
+ struct work_struct worker; /* handler for incoming requests */
+ } requests;
+
+ /** @stall_time: time of first time a CTB submission is stalled */
+ ktime_t stall_time;
+};
+
+void intel_guc_ct_init_early(struct intel_guc_ct *ct);
+int intel_guc_ct_init(struct intel_guc_ct *ct);
+void intel_guc_ct_fini(struct intel_guc_ct *ct);
+int intel_guc_ct_enable(struct intel_guc_ct *ct);
+void intel_guc_ct_disable(struct intel_guc_ct *ct);
+
+static inline void intel_guc_ct_sanitize(struct intel_guc_ct *ct)
+{
+ ct->enabled = false;
+}
+
+static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
+{
+ return ct->enabled;
+}
+
+#define INTEL_GUC_CT_SEND_NB BIT(31)
+#define INTEL_GUC_CT_SEND_G2H_DW_SHIFT 0
+#define INTEL_GUC_CT_SEND_G2H_DW_MASK (0xff << INTEL_GUC_CT_SEND_G2H_DW_SHIFT)
+#define MAKE_SEND_FLAGS(len) ({ \
+ typeof(len) len_ = (len); \
+ GEM_BUG_ON(!FIELD_FIT(INTEL_GUC_CT_SEND_G2H_DW_MASK, len_)); \
+ (FIELD_PREP(INTEL_GUC_CT_SEND_G2H_DW_MASK, len_) | INTEL_GUC_CT_SEND_NB); \
+})
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size, u32 flags);
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct);
+
+void intel_guc_ct_print_info(struct intel_guc_ct *ct, struct drm_printer *p);
+
+#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
new file mode 100644
index 000000000..25f09a420
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+#include "gt/uc/intel_guc_ads.h"
+#include "gt/uc/intel_guc_ct.h"
+#include "gt/uc/intel_guc_slpc.h"
+#include "gt/uc/intel_guc_submission.h"
+#include "intel_guc.h"
+#include "intel_guc_debugfs.h"
+#include "intel_guc_log_debugfs.h"
+
+static int guc_info_show(struct seq_file *m, void *data)
+{
+ struct intel_guc *guc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_guc_is_supported(guc))
+ return -ENODEV;
+
+ intel_guc_load_status(guc, &p);
+ drm_puts(&p, "\n");
+ intel_guc_log_info(&guc->log, &p);
+
+ if (!intel_guc_submission_is_used(guc))
+ return 0;
+
+ intel_guc_ct_print_info(&guc->ct, &p);
+ intel_guc_submission_print_info(guc, &p);
+ intel_guc_ads_print_policy_info(guc, &p);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_info);
+
+static int guc_registered_contexts_show(struct seq_file *m, void *data)
+{
+ struct intel_guc *guc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_guc_submission_is_used(guc))
+ return -ENODEV;
+
+ intel_guc_submission_print_context_info(guc, &p);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts);
+
+static int guc_slpc_info_show(struct seq_file *m, void *unused)
+{
+ struct intel_guc *guc = m->private;
+ struct intel_guc_slpc *slpc = &guc->slpc;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_guc_slpc_is_used(guc))
+ return -ENODEV;
+
+ return intel_guc_slpc_print_info(slpc, &p);
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info);
+
+static bool intel_eval_slpc_support(void *data)
+{
+ struct intel_guc *guc = (struct intel_guc *)data;
+
+ return intel_guc_slpc_is_used(guc);
+}
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "guc_info", &guc_info_fops, NULL },
+ { "guc_registered_contexts", &guc_registered_contexts_fops, NULL },
+ { "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support},
+ };
+
+ if (!intel_guc_is_supported(guc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), guc);
+ intel_guc_log_debugfs_register(&guc->log, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
new file mode 100644
index 000000000..424c26665
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_H
+#define DEBUGFS_GUC_H
+
+struct intel_guc;
+struct dentry;
+
+void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root);
+
+#endif /* DEBUGFS_GUC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
new file mode 100644
index 000000000..a0372735c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ *
+ * Authors:
+ * Vinit Azad <vinit.azad@intel.com>
+ * Ben Widawsky <ben@bwidawsk.net>
+ * Dave Gordon <david.s.gordon@intel.com>
+ * Alex Dai <yu.dai@intel.com>
+ */
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
+#include "intel_guc_fw.h"
+#include "i915_drv.h"
+
+static void guc_prepare_xfer(struct intel_uncore *uncore)
+{
+ u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
+ GUC_ENABLE_MIA_CLOCK_GATING;
+
+ if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
+ shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_MIA_CACHING;
+
+ /* Must program this register before loading the ucode with DMA */
+ intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
+
+ if (IS_GEN9_LP(uncore->i915))
+ intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ else
+ intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+ if (GRAPHICS_VER(uncore->i915) == 9) {
+ /* DOP Clock Gating Enable for GuC clocks */
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
+
+ /* allows for 5us (in 10ns units) before GT can go to RC6 */
+ intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
+ }
+}
+
+static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ u32 rsa[UOS_RSA_SCRATCH_COUNT];
+ size_t copied;
+ int i;
+
+ copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
+ if (copied < sizeof(rsa))
+ return -ENOMEM;
+
+ for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
+ intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
+
+ return 0;
+}
+
+static int guc_xfer_rsa_vma(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
+
+ intel_uncore_write(uncore, UOS_RSA_SCRATCH(0),
+ intel_guc_ggtt_offset(guc, guc_fw->rsa_data));
+
+ return 0;
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ if (guc_fw->rsa_data)
+ return guc_xfer_rsa_vma(guc_fw, uncore);
+ else
+ return guc_xfer_rsa_mmio(guc_fw, uncore);
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for()
+ * loop below.
+ */
+static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
+{
+ u32 val = intel_uncore_read(uncore, GUC_STATUS);
+ u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, val);
+
+ *status = val;
+ return uk_val == INTEL_GUC_LOAD_STATUS_READY;
+}
+
+static int guc_wait_ucode(struct intel_uncore *uncore)
+{
+ u32 status;
+ int ret;
+
+ /*
+ * Wait for the GuC to start up.
+ * NB: Docs recommend not using the interrupt for completion.
+ * Measurements indicate this should take no more than 20ms
+ * (assuming the GT clock is at maximum frequency). So, a
+ * timeout here indicates that the GuC has failed and is unusable.
+ * (Higher levels of the driver may decide to reset the GuC and
+ * attempt the ucode load again if this happens.)
+ *
+ * FIXME: There is a known (but exceedingly unlikely) race condition
+ * where the asynchronous frequency management code could reduce
+ * the GT clock while a GuC reload is in progress (during a full
+ * GT reset). A fix is in progress but there are complex locking
+ * issues to be resolved. In the meantime bump the timeout to
+ * 200ms. Even at slowest clock, this should be sufficient. And
+ * in the working case, a larger timeout makes no difference.
+ */
+ ret = wait_for(guc_ready(uncore, &status), 200);
+ if (ret) {
+ struct drm_device *drm = &uncore->i915->drm;
+
+ drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
+ drm_info(drm, "GuC load failed: status: Reset = %d, "
+ "BootROM = 0x%02X, UKernel = 0x%02X, "
+ "MIA = 0x%02X, Auth = 0x%02X\n",
+ REG_FIELD_GET(GS_MIA_IN_RESET, status),
+ REG_FIELD_GET(GS_BOOTROM_MASK, status),
+ REG_FIELD_GET(GS_UKERNEL_MASK, status),
+ REG_FIELD_GET(GS_MIA_MASK, status),
+ REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
+
+ if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
+ drm_info(drm, "GuC firmware signature verification failed\n");
+ ret = -ENOEXEC;
+ }
+
+ if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == INTEL_GUC_LOAD_STATUS_EXCEPTION) {
+ drm_info(drm, "GuC firmware exception. EIP: %#x\n",
+ intel_uncore_read(uncore, SOFT_SCRATCH(13)));
+ ret = -ENXIO;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * intel_guc_fw_upload() - load GuC uCode to device
+ * @guc: intel_guc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset.
+ *
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_guc_fw_upload(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ guc_prepare_xfer(uncore);
+
+ /*
+ * Note that GuC needs the CSS header plus uKernel code to be copied
+ * by the DMA engine in one operation, whereas the RSA signature is
+ * loaded separately, either by copying it to the UOS_RSA_SCRATCH
+ * register (if key size <= 256) or through a ggtt-pinned vma (if key
+ * size > 256). The RSA size and therefore the way we provide it to the
+ * HW is fixed for each platform and hard-coded in the bootrom.
+ */
+ ret = guc_xfer_rsa(&guc->fw, uncore);
+ if (ret)
+ goto out;
+
+ /*
+ * Current uCode expects the code to be loaded at 8k; locations below
+ * this are used for the stack.
+ */
+ ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
+ if (ret)
+ goto out;
+
+ ret = guc_wait_ucode(uncore);
+ if (ret)
+ goto out;
+
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ return 0;
+
+out:
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
new file mode 100644
index 000000000..0b4d2a9c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2017-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_FW_H_
+#define _INTEL_GUC_FW_H_
+
+struct intel_guc;
+
+int intel_guc_fw_upload(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
new file mode 100644
index 000000000..502e7cb5a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_FWIF_H
+#define _INTEL_GUC_FWIF_H
+
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include "gt/intel_engine_types.h"
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_slpc_abi.h"
+#include "abi/guc_errors_abi.h"
+#include "abi/guc_communication_mmio_abi.h"
+#include "abi/guc_communication_ctb_abi.h"
+#include "abi/guc_klvs_abi.h"
+#include "abi/guc_messages_abi.h"
+
+/* Payload length only i.e. don't include G2H header length */
+#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2
+#define G2H_LEN_DW_DEREGISTER_CONTEXT 1
+
+#define GUC_CONTEXT_DISABLE 0
+#define GUC_CONTEXT_ENABLE 1
+
+#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
+#define GUC_CLIENT_PRIORITY_HIGH 1
+#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
+#define GUC_CLIENT_PRIORITY_NORMAL 3
+#define GUC_CLIENT_PRIORITY_NUM 4
+
+#define GUC_MAX_CONTEXT_ID 65535
+#define GUC_INVALID_CONTEXT_ID GUC_MAX_CONTEXT_ID
+
+#define GUC_RENDER_ENGINE 0
+#define GUC_VIDEO_ENGINE 1
+#define GUC_BLITTER_ENGINE 2
+#define GUC_VIDEOENHANCE_ENGINE 3
+#define GUC_VIDEO_ENGINE2 4
+#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+
+#define GUC_RENDER_CLASS 0
+#define GUC_VIDEO_CLASS 1
+#define GUC_VIDEOENHANCE_CLASS 2
+#define GUC_BLITTER_CLASS 3
+#define GUC_COMPUTE_CLASS 4
+#define GUC_LAST_ENGINE_CLASS GUC_COMPUTE_CLASS
+#define GUC_MAX_ENGINE_CLASSES 16
+#define GUC_MAX_INSTANCES_PER_CLASS 32
+
+#define GUC_DOORBELL_INVALID 256
+
+/*
+ * Work queue item header definitions
+ *
+ * Work queue is circular buffer used to submit complex (multi-lrc) submissions
+ * to the GuC. A work queue item is an entry in the circular buffer.
+ */
+#define WQ_STATUS_ACTIVE 1
+#define WQ_STATUS_SUSPENDED 2
+#define WQ_STATUS_CMD_ERROR 3
+#define WQ_STATUS_ENGINE_ID_NOT_USED 4
+#define WQ_STATUS_SUSPENDED_FROM_RESET 5
+#define WQ_TYPE_BATCH_BUF 0x1
+#define WQ_TYPE_PSEUDO 0x2
+#define WQ_TYPE_INORDER 0x3
+#define WQ_TYPE_NOOP 0x4
+#define WQ_TYPE_MULTI_LRC 0x5
+#define WQ_TYPE_MASK GENMASK(7, 0)
+#define WQ_LEN_MASK GENMASK(26, 16)
+
+#define WQ_GUC_ID_MASK GENMASK(15, 0)
+#define WQ_RING_TAIL_MASK GENMASK(28, 18)
+
+#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
+#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
+#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
+#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3)
+#define GUC_STAGE_DESC_ATTR_RESET BIT(4)
+#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5)
+#define GUC_STAGE_DESC_ATTR_PCH BIT(6)
+#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
+
+#define GUC_CTL_LOG_PARAMS 0
+#define GUC_LOG_VALID BIT(0)
+#define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1)
+#define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2)
+#define GUC_LOG_LOG_ALLOC_UNITS BIT(3)
+#define GUC_LOG_CRASH_SHIFT 4
+#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
+#define GUC_LOG_DEBUG_SHIFT 6
+#define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT)
+#define GUC_LOG_CAPTURE_SHIFT 10
+#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT)
+#define GUC_LOG_BUF_ADDR_SHIFT 12
+
+#define GUC_CTL_WA 1
+#define GUC_WA_GAM_CREDITS BIT(10)
+#define GUC_WA_DUAL_QUEUE BIT(11)
+#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
+#define GUC_WA_POLLCS BIT(18)
+#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
+
+#define GUC_CTL_FEATURE 2
+#define GUC_CTL_ENABLE_SLPC BIT(2)
+#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
+
+#define GUC_CTL_DEBUG 3
+#define GUC_LOG_VERBOSITY_SHIFT 0
+#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
+/* Verbosity range-check limits, without the shift */
+#define GUC_LOG_VERBOSITY_MIN 0
+#define GUC_LOG_VERBOSITY_MAX 3
+#define GUC_LOG_VERBOSITY_MASK 0x0000000f
+#define GUC_LOG_DESTINATION_MASK (3 << 4)
+#define GUC_LOG_DISABLED (1 << 6)
+#define GUC_PROFILE_ENABLED (1 << 7)
+
+#define GUC_CTL_ADS 4
+#define GUC_ADS_ADDR_SHIFT 1
+#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
+
+#define GUC_CTL_DEVID 5
+
+#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
+
+/* Generic GT SysInfo data types */
+#define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED 0
+#define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK 1
+#define GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI 2
+#define GUC_GENERIC_GT_SYSINFO_MAX 16
+
+/*
+ * The class goes in bits [0..2] of the GuC ID, the instance in bits [3..6].
+ * Bit 7 can be used for operations that apply to all engine classes&instances.
+ */
+#define GUC_ENGINE_CLASS_SHIFT 0
+#define GUC_ENGINE_CLASS_MASK (0x7 << GUC_ENGINE_CLASS_SHIFT)
+#define GUC_ENGINE_INSTANCE_SHIFT 3
+#define GUC_ENGINE_INSTANCE_MASK (0xf << GUC_ENGINE_INSTANCE_SHIFT)
+#define GUC_ENGINE_ALL_INSTANCES BIT(7)
+
+#define MAKE_GUC_ID(class, instance) \
+ (((class) << GUC_ENGINE_CLASS_SHIFT) | \
+ ((instance) << GUC_ENGINE_INSTANCE_SHIFT))
+
+#define GUC_ID_TO_ENGINE_CLASS(guc_id) \
+ (((guc_id) & GUC_ENGINE_CLASS_MASK) >> GUC_ENGINE_CLASS_SHIFT)
+#define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \
+ (((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT)
+
+#define SLPC_EVENT(id, c) (\
+FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
+FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, c) \
+)
+
+/* the GuC arrays don't include OTHER_CLASS */
+static u8 engine_class_guc_class_map[] = {
+ [RENDER_CLASS] = GUC_RENDER_CLASS,
+ [COPY_ENGINE_CLASS] = GUC_BLITTER_CLASS,
+ [VIDEO_DECODE_CLASS] = GUC_VIDEO_CLASS,
+ [VIDEO_ENHANCEMENT_CLASS] = GUC_VIDEOENHANCE_CLASS,
+ [COMPUTE_CLASS] = GUC_COMPUTE_CLASS,
+};
+
+static u8 guc_class_engine_class_map[] = {
+ [GUC_RENDER_CLASS] = RENDER_CLASS,
+ [GUC_BLITTER_CLASS] = COPY_ENGINE_CLASS,
+ [GUC_VIDEO_CLASS] = VIDEO_DECODE_CLASS,
+ [GUC_VIDEOENHANCE_CLASS] = VIDEO_ENHANCEMENT_CLASS,
+ [GUC_COMPUTE_CLASS] = COMPUTE_CLASS,
+};
+
+static inline u8 engine_class_to_guc_class(u8 class)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(engine_class_guc_class_map) != MAX_ENGINE_CLASS + 1);
+ GEM_BUG_ON(class > MAX_ENGINE_CLASS || class == OTHER_CLASS);
+
+ return engine_class_guc_class_map[class];
+}
+
+static inline u8 guc_class_to_engine_class(u8 guc_class)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(guc_class_engine_class_map) != GUC_LAST_ENGINE_CLASS + 1);
+ GEM_BUG_ON(guc_class > GUC_LAST_ENGINE_CLASS);
+
+ return guc_class_engine_class_map[guc_class];
+}
+
+/* Work item for submitting workloads into work queue of GuC. */
+struct guc_wq_item {
+ u32 header;
+ u32 context_desc;
+ u32 submit_element_info;
+ u32 fence_id;
+} __packed;
+
+struct guc_process_desc_v69 {
+ u32 stage_id;
+ u64 db_base_addr;
+ u32 head;
+ u32 tail;
+ u32 error_offset;
+ u64 wq_base_addr;
+ u32 wq_size_bytes;
+ u32 wq_status;
+ u32 engine_presence;
+ u32 priority;
+ u32 reserved[36];
+} __packed;
+
+struct guc_sched_wq_desc {
+ u32 head;
+ u32 tail;
+ u32 error_offset;
+ u32 wq_status;
+ u32 reserved[28];
+} __packed;
+
+/* Helper for context registration H2G */
+struct guc_ctxt_registration_info {
+ u32 flags;
+ u32 context_idx;
+ u32 engine_class;
+ u32 engine_submit_mask;
+ u32 wq_desc_lo;
+ u32 wq_desc_hi;
+ u32 wq_base_lo;
+ u32 wq_base_hi;
+ u32 wq_size;
+ u32 hwlrca_lo;
+ u32 hwlrca_hi;
+};
+#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
+
+/* Preempt to idle on quantum expiry */
+#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69 BIT(0)
+
+/*
+ * GuC Context registration descriptor.
+ * FIXME: This is only required to exist during context registration.
+ * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
+ * is not required.
+ */
+struct guc_lrc_desc_v69 {
+ u32 hw_context_desc;
+ u32 slpm_perf_mode_hint; /* SPLC v1 only */
+ u32 slpm_freq_hint;
+ u32 engine_submit_mask; /* In logical space */
+ u8 engine_class;
+ u8 reserved0[3];
+ u32 priority;
+ u32 process_desc;
+ u32 wq_addr;
+ u32 wq_size;
+ u32 context_flags; /* CONTEXT_REGISTRATION_* */
+ /* Time for one workload to execute. (in micro seconds) */
+ u32 execution_quantum;
+ /* Time to wait for a preemption request to complete before issuing a
+ * reset. (in micro seconds).
+ */
+ u32 preemption_timeout;
+ u32 policy_flags; /* CONTEXT_POLICY_* */
+ u32 reserved1[19];
+} __packed;
+
+/* 32-bit KLV structure as used by policy updates and others */
+struct guc_klv_generic_dw_t {
+ u32 kl;
+ u32 value;
+} __packed;
+
+/* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
+struct guc_update_context_policy_header {
+ u32 action;
+ u32 ctx_id;
+} __packed;
+
+struct guc_update_context_policy {
+ struct guc_update_context_policy_header header;
+ struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
+} __packed;
+
+#define GUC_POWER_UNSPECIFIED 0
+#define GUC_POWER_D0 1
+#define GUC_POWER_D1 2
+#define GUC_POWER_D2 3
+#define GUC_POWER_D3 4
+
+/* Scheduling policy settings */
+
+#define GLOBAL_POLICY_MAX_NUM_WI 15
+
+/* Don't reset an engine upon preemption failure */
+#define GLOBAL_POLICY_DISABLE_ENGINE_RESET BIT(0)
+
+#define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
+
+/*
+ * GuC converts the timeout to clock ticks internally. Different platforms have
+ * different GuC clocks. Thus, the maximum value before overflow is platform
+ * dependent. Current worst case scenario is about 110s. So, the spec says to
+ * limit to 100s to be safe.
+ */
+#define GUC_POLICY_MAX_EXEC_QUANTUM_US (100 * 1000 * 1000UL)
+#define GUC_POLICY_MAX_PREEMPT_TIMEOUT_US (100 * 1000 * 1000UL)
+
+static inline u32 guc_policy_max_exec_quantum_ms(void)
+{
+ BUILD_BUG_ON(GUC_POLICY_MAX_EXEC_QUANTUM_US >= UINT_MAX);
+ return GUC_POLICY_MAX_EXEC_QUANTUM_US / 1000;
+}
+
+static inline u32 guc_policy_max_preempt_timeout_ms(void)
+{
+ BUILD_BUG_ON(GUC_POLICY_MAX_PREEMPT_TIMEOUT_US >= UINT_MAX);
+ return GUC_POLICY_MAX_PREEMPT_TIMEOUT_US / 1000;
+}
+
+struct guc_policies {
+ u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
+ /* In micro seconds. How much time to allow before DPC processing is
+ * called back via interrupt (to prevent DPC queue drain starving).
+ * Typically 1000s of micro seconds (example only, not granularity). */
+ u32 dpc_promote_time;
+
+ /* Must be set to take these new values. */
+ u32 is_valid;
+
+ /* Max number of WIs to process per call. A large value may keep CS
+ * idle. */
+ u32 max_num_work_items;
+
+ u32 global_flags;
+ u32 reserved[4];
+} __packed;
+
+/* GuC MMIO reg state struct */
+struct guc_mmio_reg {
+ u32 offset;
+ u32 value;
+ u32 flags;
+#define GUC_REGSET_MASKED BIT(0)
+#define GUC_REGSET_NEEDS_STEERING BIT(1)
+#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
+#define GUC_REGSET_RESTORE_ONLY BIT(3)
+#define GUC_REGSET_STEERING_GROUP GENMASK(15, 12)
+#define GUC_REGSET_STEERING_INSTANCE GENMASK(23, 20)
+ u32 mask;
+} __packed;
+
+/* GuC register sets */
+struct guc_mmio_reg_set {
+ u32 address;
+ u16 count;
+ u16 reserved;
+} __packed;
+
+/* HW info */
+struct guc_gt_system_info {
+ u8 mapping_table[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+ u32 engine_enabled_masks[GUC_MAX_ENGINE_CLASSES];
+ u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
+} __packed;
+
+enum {
+ GUC_CAPTURE_LIST_INDEX_PF = 0,
+ GUC_CAPTURE_LIST_INDEX_VF = 1,
+ GUC_CAPTURE_LIST_INDEX_MAX = 2,
+};
+
+/*Register-types of GuC capture register lists */
+enum guc_capture_type {
+ GUC_CAPTURE_LIST_TYPE_GLOBAL = 0,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ GUC_CAPTURE_LIST_TYPE_MAX,
+};
+
+/* GuC Additional Data Struct */
+struct guc_ads {
+ struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+ u32 reserved0;
+ u32 scheduler_policies;
+ u32 gt_system_info;
+ u32 reserved1;
+ u32 control_data;
+ u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
+ u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
+ u32 private_data;
+ u32 reserved2;
+ u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
+ u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
+ u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
+ u32 reserved[14];
+} __packed;
+
+/* Engine usage stats */
+struct guc_engine_usage_record {
+ u32 current_context_index;
+ u32 last_switch_in_stamp;
+ u32 reserved0;
+ u32 total_runtime;
+ u32 reserved1[4];
+} __packed;
+
+struct guc_engine_usage {
+ struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
+/* GuC logging structures */
+
+enum guc_log_buffer_type {
+ GUC_DEBUG_LOG_BUFFER,
+ GUC_CRASH_DUMP_LOG_BUFFER,
+ GUC_CAPTURE_LOG_BUFFER,
+ GUC_MAX_LOG_BUFFER
+};
+
+/**
+ * struct guc_log_buffer_state - GuC log buffer state
+ *
+ * Below state structure is used for coordination of retrieval of GuC firmware
+ * logs. Separate state is maintained for each log buffer type.
+ * read_ptr points to the location where i915 read last in log buffer and
+ * is read only for GuC firmware. write_ptr is incremented by GuC with number
+ * of bytes written for each log entry and is read only for i915.
+ * When any type of log buffer becomes half full, GuC sends a flush interrupt.
+ * GuC firmware expects that while it is writing to 2nd half of the buffer,
+ * first half would get consumed by Host and then get a flush completed
+ * acknowledgment from Host, so that it does not end up doing any overwrite
+ * causing loss of logs. So when buffer gets half filled & i915 has requested
+ * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
+ * to the value of write_ptr and raise the interrupt.
+ * On receiving the interrupt i915 should read the buffer, clear flush_to_file
+ * field and also update read_ptr with the value of sample_write_ptr, before
+ * sending an acknowledgment to GuC. marker & version fields are for internal
+ * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
+ * time GuC detects the log buffer overflow.
+ */
+struct guc_log_buffer_state {
+ u32 marker[2];
+ u32 read_ptr;
+ u32 write_ptr;
+ u32 size;
+ u32 sampled_write_ptr;
+ u32 wrap_offset;
+ union {
+ struct {
+ u32 flush_to_file:1;
+ u32 buffer_full_cnt:4;
+ u32 reserved:27;
+ };
+ u32 flags;
+ };
+ u32 version;
+} __packed;
+
+struct guc_ctx_report {
+ u32 report_return_status;
+ u32 reserved1[64];
+ u32 affected_count;
+ u32 reserved2[2];
+} __packed;
+
+/* GuC Shared Context Data Struct */
+struct guc_shared_ctx_data {
+ u32 addr_of_last_preempted_data_low;
+ u32 addr_of_last_preempted_data_high;
+ u32 addr_of_last_preempted_data_high_tmp;
+ u32 padding;
+ u32 is_mapped_to_proxy;
+ u32 proxy_ctx_id;
+ u32 engine_reset_ctx_id;
+ u32 media_reset_count;
+ u32 reserved1[8];
+ u32 uk_last_ctx_switch_reason;
+ u32 was_reset;
+ u32 lrca_gpu_addr;
+ u64 execlist_ctx;
+ u32 reserved2[66];
+ struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
+} __packed;
+
+/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
+enum intel_guc_recv_message {
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
+ INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
new file mode 100644
index 000000000..4781fccc2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "gt/intel_hwconfig.h"
+#include "i915_drv.h"
+#include "i915_memcpy.h"
+
+/*
+ * GuC has a blob containing hardware configuration information (HWConfig).
+ * This is formatted as a simple and flexible KLV (Key/Length/Value) table.
+ *
+ * For example, a minimal version could be:
+ * enum device_attr {
+ * ATTR_SOME_VALUE = 0,
+ * ATTR_SOME_MASK = 1,
+ * };
+ *
+ * static const u32 hwconfig[] = {
+ * ATTR_SOME_VALUE,
+ * 1, // Value Length in DWords
+ * 8, // Value
+ *
+ * ATTR_SOME_MASK,
+ * 3,
+ * 0x00FFFFFFFF, 0xFFFFFFFF, 0xFF000000,
+ * };
+ *
+ * The attribute ids are defined in a hardware spec.
+ */
+
+static int __guc_action_get_hwconfig(struct intel_guc *guc,
+ u32 ggtt_offset, u32 ggtt_size)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_GET_HWCONFIG,
+ lower_32_bits(ggtt_offset),
+ upper_32_bits(ggtt_offset),
+ ggtt_size,
+ };
+ int ret;
+
+ ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+ if (ret == -ENXIO)
+ return -ENOENT;
+
+ return ret;
+}
+
+static int guc_hwconfig_discover_size(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
+{
+ int ret;
+
+ /*
+ * Sending a query with zero offset and size will return the
+ * size of the blob.
+ */
+ ret = __guc_action_get_hwconfig(guc, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ return -EINVAL;
+
+ hwconfig->size = ret;
+ return 0;
+}
+
+static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
+{
+ struct i915_vma *vma;
+ u32 ggtt_offset;
+ void *vaddr;
+ int ret;
+
+ GEM_BUG_ON(!hwconfig->size);
+
+ ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr);
+ if (ret)
+ return ret;
+
+ ggtt_offset = intel_guc_ggtt_offset(guc, vma);
+
+ ret = __guc_action_get_hwconfig(guc, ggtt_offset, hwconfig->size);
+ if (ret >= 0)
+ memcpy(hwconfig->ptr, vaddr, hwconfig->size);
+
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+
+ return ret;
+}
+
+static bool has_table(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915) && !IS_ADLP_N(i915))
+ return true;
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
+ return true;
+
+ return false;
+}
+
+/**
+ * intel_guc_hwconfig_init - Initialize the HWConfig
+ *
+ * Retrieve the HWConfig table from the GuC and save it locally.
+ * It can then be queried on demand by other users later on.
+ */
+static int guc_hwconfig_init(struct intel_gt *gt)
+{
+ struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
+ struct intel_guc *guc = &gt->uc.guc;
+ int ret;
+
+ if (!has_table(gt->i915))
+ return 0;
+
+ ret = guc_hwconfig_discover_size(guc, hwconfig);
+ if (ret)
+ return ret;
+
+ hwconfig->ptr = kmalloc(hwconfig->size, GFP_KERNEL);
+ if (!hwconfig->ptr) {
+ hwconfig->size = 0;
+ return -ENOMEM;
+ }
+
+ ret = guc_hwconfig_fill_buffer(guc, hwconfig);
+ if (ret < 0) {
+ intel_gt_fini_hwconfig(gt);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_gt_init_hwconfig - Initialize the HWConfig if available
+ *
+ * Retrieve the HWConfig table if available on the current platform.
+ */
+int intel_gt_init_hwconfig(struct intel_gt *gt)
+{
+ if (!intel_uc_uses_guc(&gt->uc))
+ return 0;
+
+ return guc_hwconfig_init(gt);
+}
+
+/**
+ * intel_gt_fini_hwconfig - Finalize the HWConfig
+ *
+ * Free up the memory allocation holding the table.
+ */
+void intel_gt_fini_hwconfig(struct intel_gt *gt)
+{
+ struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
+
+ kfree(hwconfig->ptr);
+ hwconfig->size = 0;
+ hwconfig->ptr = NULL;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
new file mode 100644
index 000000000..68331c538
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -0,0 +1,930 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+
+#include "gt/intel_gt.h"
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "i915_memcpy.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_log.h"
+
+#if defined(CONFIG_DRM_I915_DEBUG_GUC)
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
+#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
+#else
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
+#endif
+
+static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
+
+struct guc_log_section {
+ u32 max;
+ u32 flag;
+ u32 default_val;
+ const char *name;
+};
+
+static void _guc_log_init_sizes(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = {
+ {
+ GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT,
+ GUC_LOG_LOG_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE,
+ "crash dump"
+ },
+ {
+ GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT,
+ GUC_LOG_LOG_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE,
+ "debug",
+ },
+ {
+ GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT,
+ GUC_LOG_CAPTURE_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE,
+ "capture",
+ }
+ };
+ int i;
+
+ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++)
+ log->sizes[i].bytes = sections[i].default_val;
+
+ /* If debug size > 1MB then bump default crash size to keep the same units */
+ if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M &&
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE < SZ_1M)
+ log->sizes[GUC_LOG_SECTIONS_CRASH].bytes = SZ_1M;
+
+ /* Prepare the GuC API structure fields: */
+ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) {
+ /* Convert to correct units */
+ if ((log->sizes[i].bytes % SZ_1M) == 0) {
+ log->sizes[i].units = SZ_1M;
+ log->sizes[i].flag = sections[i].flag;
+ } else {
+ log->sizes[i].units = SZ_4K;
+ log->sizes[i].flag = 0;
+ }
+
+ if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units))
+ drm_err(&i915->drm, "Mis-aligned GuC log %s size: 0x%X vs 0x%X!",
+ sections[i].name, log->sizes[i].bytes, log->sizes[i].units);
+ log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units;
+
+ if (!log->sizes[i].count) {
+ drm_err(&i915->drm, "Zero GuC log %s size!", sections[i].name);
+ } else {
+ /* Size is +1 unit */
+ log->sizes[i].count--;
+ }
+
+ /* Clip to field size */
+ if (log->sizes[i].count > sections[i].max) {
+ drm_err(&i915->drm, "GuC log %s size too large: %d vs %d!",
+ sections[i].name, log->sizes[i].count + 1, sections[i].max + 1);
+ log->sizes[i].count = sections[i].max;
+ }
+ }
+
+ if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) {
+ drm_err(&i915->drm, "Unit mis-match for GuC log crash and debug sections: %d vs %d!",
+ log->sizes[GUC_LOG_SECTIONS_CRASH].units,
+ log->sizes[GUC_LOG_SECTIONS_DEBUG].units);
+ log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units;
+ log->sizes[GUC_LOG_SECTIONS_CRASH].count = 0;
+ }
+
+ log->sizes_initialised = true;
+}
+
+static void guc_log_init_sizes(struct intel_guc_log *log)
+{
+ if (log->sizes_initialised)
+ return;
+
+ _guc_log_init_sizes(log);
+}
+
+static u32 intel_guc_log_section_size_crash(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_CRASH].bytes;
+}
+
+static u32 intel_guc_log_section_size_debug(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes;
+}
+
+u32 intel_guc_log_section_size_capture(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_CAPTURE].bytes;
+}
+
+static u32 intel_guc_log_size(struct intel_guc_log *log)
+{
+ /*
+ * GuC Log buffer Layout:
+ *
+ * NB: Ordering must follow "enum guc_log_buffer_type".
+ *
+ * +===============================+ 00B
+ * | Debug state header |
+ * +-------------------------------+ 32B
+ * | Crash dump state header |
+ * +-------------------------------+ 64B
+ * | Capture state header |
+ * +-------------------------------+ 96B
+ * | |
+ * +===============================+ PAGE_SIZE (4KB)
+ * | Debug logs |
+ * +===============================+ + DEBUG_SIZE
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
+ * | Capture logs |
+ * +===============================+ + CAPTURE_SIZE
+ */
+ return PAGE_SIZE +
+ intel_guc_log_section_size_crash(log) +
+ intel_guc_log_section_size_debug(log) +
+ intel_guc_log_section_size_capture(log);
+}
+
+/**
+ * DOC: GuC firmware log
+ *
+ * Firmware log is enabled by setting i915.guc_log_level to the positive level.
+ * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
+ * i915_guc_load_status will print out firmware loading status and scratch
+ * registers value.
+ */
+
+static int guc_action_flush_log_complete(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
+ GUC_DEBUG_LOG_BUFFER
+ };
+
+ return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
+}
+
+static int guc_action_flush_log(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
+ 0
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static int guc_action_control_log(struct intel_guc *guc, bool enable,
+ bool default_logging, u32 verbosity)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
+ (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
+ (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
+ (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
+ };
+
+ GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ /*
+ * Use no-overwrite mode by default, where relay will stop accepting
+ * new data if there are no empty sub buffers left.
+ * There is no strict synchronization enforced by relay between Consumer
+ * and Producer. In overwrite mode, there is a possibility of getting
+ * inconsistent/garbled data, the producer could be writing on to the
+ * same sub buffer from which Consumer is reading. This can't be avoided
+ * unless Consumer is fast enough and can always run in tandem with
+ * Producer.
+ */
+ if (relay_buf_full(buf))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ /*
+ * This to enable the use of a single buffer for the relay channel and
+ * correspondingly have a single file exposed to User, through which
+ * it can collect the logs in order without any post-processing.
+ * Need to set 'is_global' even if parent is NULL for early logging.
+ */
+ *is_global = 1;
+
+ if (!parent)
+ return NULL;
+
+ buf_file = debugfs_create_file(filename, mode,
+ parent, buf, &relay_file_operations);
+ if (IS_ERR(buf_file))
+ return NULL;
+
+ return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+/* relay channel callbacks */
+static const struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = subbuf_start_callback,
+ .create_buf_file = create_buf_file_callback,
+ .remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_move_to_next_buf(struct intel_guc_log *log)
+{
+ /*
+ * Make sure the updates made in the sub buffer are visible when
+ * Consumer sees the following update to offset inside the sub buffer.
+ */
+ smp_wmb();
+
+ /* All data has been written, so now move the offset of sub buffer. */
+ relay_reserve(log->relay.channel, log->vma->obj->base.size -
+ intel_guc_log_section_size_capture(log));
+
+ /* Switch to the next sub buffer */
+ relay_flush(log->relay.channel);
+}
+
+static void *guc_get_write_buffer(struct intel_guc_log *log)
+{
+ /*
+ * Just get the base address of a new sub buffer and copy data into it
+ * ourselves. NULL will be returned in no-overwrite mode, if all sub
+ * buffers are full. Could have used the relay_write() to indirectly
+ * copy the data, but that would have been bit convoluted, as we need to
+ * write to only certain locations inside a sub buffer which cannot be
+ * done without using relay_reserve() along with relay_write(). So its
+ * better to use relay_reserve() alone.
+ */
+ return relay_reserve(log->relay.channel, 0);
+}
+
+bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
+ enum guc_log_buffer_type type,
+ unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ log->stats[type].overflow = full_cnt;
+ log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ log->stats[type].sampled_overflow += 16;
+ }
+
+ dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
+ "GuC log buffer overflow\n");
+ }
+
+ return overflow;
+}
+
+unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
+ enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_DEBUG_LOG_BUFFER:
+ return intel_guc_log_section_size_debug(log);
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return intel_guc_log_section_size_crash(log);
+ case GUC_CAPTURE_LOG_BUFFER:
+ return intel_guc_log_section_size_capture(log);
+ default:
+ MISSING_CASE(type);
+ }
+
+ return 0;
+}
+
+size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log,
+ enum guc_log_buffer_type type)
+{
+ enum guc_log_buffer_type i;
+ size_t offset = PAGE_SIZE;/* for the log_buffer_states */
+
+ for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
+ if (i == type)
+ break;
+ offset += intel_guc_get_log_buffer_size(log, i);
+ }
+
+ return offset;
+}
+
+static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
+{
+ unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+ struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+ struct guc_log_buffer_state log_buf_state_local;
+ enum guc_log_buffer_type type;
+ void *src_data, *dst_data;
+ bool new_overflow;
+
+ mutex_lock(&log->relay.lock);
+
+ if (WARN_ON(!intel_guc_log_relay_created(log)))
+ goto out_unlock;
+
+ /* Get the pointer to shared GuC log buffer */
+ src_data = log->buf_addr;
+ log_buf_state = src_data;
+
+ /* Get the pointer to local buffer to store the logs */
+ log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
+
+ if (unlikely(!log_buf_snapshot_state)) {
+ /*
+ * Used rate limited to avoid deluge of messages, logs might be
+ * getting consumed by User at a slow rate.
+ */
+ DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n");
+ log->relay.full_count++;
+
+ goto out_unlock;
+ }
+
+ /* Actual logs are present from the 2nd page */
+ src_data += PAGE_SIZE;
+ dst_data += PAGE_SIZE;
+
+ /* For relay logging, we exclude error state capture */
+ for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) {
+ /*
+ * Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state,
+ sizeof(struct guc_log_buffer_state));
+ buffer_size = intel_guc_get_log_buffer_size(log, type);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_cnt = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ log->stats[type].flush += log_buf_state_local.flush_to_file;
+ new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt);
+
+ /* Update the state of shared log buffer */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ log_buf_state++;
+
+ /* First copy the state structure in snapshot buffer */
+ memcpy(log_buf_snapshot_state, &log_buf_state_local,
+ sizeof(struct guc_log_buffer_state));
+
+ /*
+ * The write pointer could have been updated by GuC firmware,
+ * after sending the flush interrupt to Host, for consistency
+ * set write pointer value to same value of sampled_write_ptr
+ * in the snapshot buffer.
+ */
+ log_buf_snapshot_state->write_ptr = write_offset;
+ log_buf_snapshot_state++;
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ DRM_ERROR("invalid log buffer state\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ /* Just copy the newly written data */
+ if (read_offset > write_offset) {
+ i915_memcpy_from_wc(dst_data, src_data, write_offset);
+ bytes_to_copy = buffer_size - read_offset;
+ } else {
+ bytes_to_copy = write_offset - read_offset;
+ }
+ i915_memcpy_from_wc(dst_data + read_offset,
+ src_data + read_offset, bytes_to_copy);
+
+ src_data += buffer_size;
+ dst_data += buffer_size;
+ }
+
+ guc_move_to_next_buf(log);
+
+out_unlock:
+ mutex_unlock(&log->relay.lock);
+}
+
+static void copy_debug_logs_work(struct work_struct *work)
+{
+ struct intel_guc_log *log =
+ container_of(work, struct intel_guc_log, relay.flush_work);
+
+ guc_log_copy_debuglogs_for_relay(log);
+}
+
+static int guc_log_relay_map(struct intel_guc_log *log)
+{
+ lockdep_assert_held(&log->relay.lock);
+
+ if (!log->vma || !log->buf_addr)
+ return -ENODEV;
+
+ /*
+ * WC vmalloc mapping of log buffer pages was done at
+ * GuC Log Init time, but lets keep a ref for book-keeping
+ */
+ i915_gem_object_get(log->vma->obj);
+ log->relay.buf_in_use = true;
+
+ return 0;
+}
+
+static void guc_log_relay_unmap(struct intel_guc_log *log)
+{
+ lockdep_assert_held(&log->relay.lock);
+
+ i915_gem_object_put(log->vma->obj);
+ log->relay.buf_in_use = false;
+}
+
+void intel_guc_log_init_early(struct intel_guc_log *log)
+{
+ mutex_init(&log->relay.lock);
+ INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
+ log->relay.started = false;
+}
+
+static int guc_log_relay_create(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+ int ret;
+
+ lockdep_assert_held(&log->relay.lock);
+ GEM_BUG_ON(!log->vma);
+
+ /*
+ * Keep the size of sub buffers same as shared log buffer
+ * but GuC log-events excludes the error-state-capture logs
+ */
+ subbuf_size = log->vma->size - intel_guc_log_section_size_capture(log);
+
+ /*
+ * Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
+
+ guc_log_relay_chan = relay_open("guc_log",
+ dev_priv->drm.primary->debugfs_root,
+ subbuf_size, n_subbufs,
+ &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ log->relay.channel = guc_log_relay_chan;
+
+ return 0;
+}
+
+static void guc_log_relay_destroy(struct intel_guc_log *log)
+{
+ lockdep_assert_held(&log->relay.lock);
+
+ relay_close(log->relay.channel);
+ log->relay.channel = NULL;
+}
+
+static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ intel_wakeref_t wakeref;
+
+ _guc_log_copy_debuglogs_for_relay(log);
+
+ /*
+ * Generally device is expected to be active only at this
+ * time, so get/put should be really quick.
+ */
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ guc_action_flush_log_complete(guc);
+}
+
+static u32 __get_default_log_level(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ /* A negative value means "use platform/config default" */
+ if (i915->params.guc_log_level < 0) {
+ return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
+ GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
+ }
+
+ if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) {
+ DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
+ "guc_log_level", i915->params.guc_log_level,
+ "verbosity too high");
+ return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
+ GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
+ }
+
+ GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED);
+ GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX);
+ return i915->params.guc_log_level;
+}
+
+int intel_guc_log_create(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct i915_vma *vma;
+ void *vaddr;
+ u32 guc_log_size;
+ int ret;
+
+ GEM_BUG_ON(log->vma);
+
+ guc_log_size = intel_guc_log_size(log);
+
+ vma = intel_guc_allocate_vma(guc, guc_log_size);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ log->vma = vma;
+ /*
+ * Create a WC (Uncached for read) vmalloc mapping up front immediate access to
+ * data from memory during critical events such as error capture
+ */
+ vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ i915_vma_unpin_and_release(&log->vma, 0);
+ goto err;
+ }
+ log->buf_addr = vaddr;
+
+ log->level = __get_default_log_level(log);
+ DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
+ log->level, str_enabled_disabled(log->level),
+ str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
+ GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
+
+ return 0;
+
+err:
+ DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret);
+ return ret;
+}
+
+void intel_guc_log_destroy(struct intel_guc_log *log)
+{
+ log->buf_addr = NULL;
+ i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP);
+}
+
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
+ GEM_BUG_ON(!log->vma);
+
+ /*
+ * GuC is recognizing log levels starting from 0 to max, we're using 0
+ * as indication that logging should be disabled.
+ */
+ if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
+ return -EINVAL;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ if (log->level == level)
+ goto out_unlock;
+
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ ret = guc_action_control_log(guc,
+ GUC_LOG_LEVEL_IS_VERBOSE(level),
+ GUC_LOG_LEVEL_IS_ENABLED(level),
+ GUC_LOG_LEVEL_TO_VERBOSITY(level));
+ if (ret) {
+ DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
+ goto out_unlock;
+ }
+
+ log->level = level;
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return ret;
+}
+
+bool intel_guc_log_relay_created(const struct intel_guc_log *log)
+{
+ return log->buf_addr;
+}
+
+int intel_guc_log_relay_open(struct intel_guc_log *log)
+{
+ int ret;
+
+ if (!log->vma)
+ return -ENODEV;
+
+ mutex_lock(&log->relay.lock);
+
+ if (intel_guc_log_relay_created(log)) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ /*
+ * We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submissions.
+ */
+ if (!i915_has_memcpy_from_wc()) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+
+ ret = guc_log_relay_create(log);
+ if (ret)
+ goto out_unlock;
+
+ ret = guc_log_relay_map(log);
+ if (ret)
+ goto out_relay;
+
+ mutex_unlock(&log->relay.lock);
+
+ return 0;
+
+out_relay:
+ guc_log_relay_destroy(log);
+out_unlock:
+ mutex_unlock(&log->relay.lock);
+
+ return ret;
+}
+
+int intel_guc_log_relay_start(struct intel_guc_log *log)
+{
+ if (log->relay.started)
+ return -EEXIST;
+
+ /*
+ * When GuC is logging without us relaying to userspace, we're ignoring
+ * the flush notification. This means that we need to unconditionally
+ * flush on relay enabling, since GuC only notifies us once.
+ */
+ queue_work(system_highpri_wq, &log->relay.flush_work);
+
+ log->relay.started = true;
+
+ return 0;
+}
+
+void intel_guc_log_relay_flush(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ intel_wakeref_t wakeref;
+
+ if (!log->relay.started)
+ return;
+
+ /*
+ * Before initiating the forceful flush, wait for any pending/ongoing
+ * flush to complete otherwise forceful flush may not actually happen.
+ */
+ flush_work(&log->relay.flush_work);
+
+ with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
+ guc_action_flush_log(guc);
+
+ /* GuC would have updated log buffer by now, so copy it */
+ guc_log_copy_debuglogs_for_relay(log);
+}
+
+/*
+ * Stops the relay log. Called from intel_guc_log_relay_close(), so no
+ * possibility of race with start/flush since relay_write cannot race
+ * relay_close.
+ */
+static void guc_log_relay_stop(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ if (!log->relay.started)
+ return;
+
+ intel_synchronize_irq(i915);
+
+ flush_work(&log->relay.flush_work);
+
+ log->relay.started = false;
+}
+
+void intel_guc_log_relay_close(struct intel_guc_log *log)
+{
+ guc_log_relay_stop(log);
+
+ mutex_lock(&log->relay.lock);
+ GEM_BUG_ON(!intel_guc_log_relay_created(log));
+ guc_log_relay_unmap(log);
+ guc_log_relay_destroy(log);
+ mutex_unlock(&log->relay.lock);
+}
+
+void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
+{
+ if (log->relay.started)
+ queue_work(system_highpri_wq, &log->relay.flush_work);
+}
+
+static const char *
+stringify_guc_log_type(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_DEBUG_LOG_BUFFER:
+ return "DEBUG";
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return "CRASH";
+ case GUC_CAPTURE_LOG_BUFFER:
+ return "CAPTURE";
+ default:
+ MISSING_CASE(type);
+ }
+
+ return "";
+}
+
+/**
+ * intel_guc_log_info - dump information about GuC log relay
+ * @log: the GuC log
+ * @p: the &drm_printer
+ *
+ * Pretty printer for GuC log info
+ */
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
+{
+ enum guc_log_buffer_type type;
+
+ if (!intel_guc_log_relay_created(log)) {
+ drm_puts(p, "GuC log relay not created\n");
+ return;
+ }
+
+ drm_puts(p, "GuC logging stats:\n");
+
+ drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
+
+ for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
+ stringify_guc_log_type(type),
+ log->stats[type].flush,
+ log->stats[type].sampled_overflow);
+ }
+}
+
+/**
+ * intel_guc_log_dump - dump the contents of the GuC log
+ * @log: the GuC log
+ * @p: the &drm_printer
+ * @dump_load_err: dump the log saved on GuC load error
+ *
+ * Pretty printer for the GuC log
+ */
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+ bool dump_load_err)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+ struct drm_i915_gem_object *obj = NULL;
+ void *map;
+ u32 *page;
+ int i, j;
+
+ if (!intel_guc_is_supported(guc))
+ return -ENODEV;
+
+ if (dump_load_err)
+ obj = uc->load_err_log;
+ else if (guc->log.vma)
+ obj = guc->log.vma->obj;
+
+ if (!obj)
+ return 0;
+
+ page = (u32 *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ intel_guc_dump_time_info(guc, p);
+
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ DRM_DEBUG("Failed to pin object\n");
+ drm_puts(p, "(log data unaccessible)\n");
+ free_page((unsigned long)page);
+ return PTR_ERR(map);
+ }
+
+ for (i = 0; i < obj->base.size; i += PAGE_SIZE) {
+ if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
+ memcpy(page, map + i, PAGE_SIZE);
+
+ for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4)
+ drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(page + j + 0), *(page + j + 1),
+ *(page + j + 2), *(page + j + 3));
+ }
+
+ drm_puts(p, "\n");
+
+ i915_gem_object_unpin_map(obj);
+ free_page((unsigned long)page);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
new file mode 100644
index 000000000..02127703b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_LOG_H_
+#define _INTEL_GUC_LOG_H_
+
+#include <linux/mutex.h>
+#include <linux/relay.h>
+#include <linux/workqueue.h>
+
+#include "intel_guc_fwif.h"
+#include "i915_gem.h"
+
+struct intel_guc;
+
+/*
+ * While we're using plain log level in i915, GuC controls are much more...
+ * "elaborate"? We have a couple of bits for verbosity, separate bit for actual
+ * log enabling, and separate bit for default logging - which "conveniently"
+ * ignores the enable bit.
+ */
+#define GUC_LOG_LEVEL_DISABLED 0
+#define GUC_LOG_LEVEL_NON_VERBOSE 1
+#define GUC_LOG_LEVEL_IS_ENABLED(x) ((x) > GUC_LOG_LEVEL_DISABLED)
+#define GUC_LOG_LEVEL_IS_VERBOSE(x) ((x) > GUC_LOG_LEVEL_NON_VERBOSE)
+#define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({ \
+ typeof(x) _x = (x); \
+ GUC_LOG_LEVEL_IS_VERBOSE(_x) ? _x - 2 : 0; \
+})
+#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2)
+#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
+
+enum {
+ GUC_LOG_SECTIONS_CRASH,
+ GUC_LOG_SECTIONS_DEBUG,
+ GUC_LOG_SECTIONS_CAPTURE,
+ GUC_LOG_SECTIONS_LIMIT
+};
+
+struct intel_guc_log {
+ u32 level;
+
+ /* Allocation settings */
+ struct {
+ s32 bytes; /* Size in bytes */
+ s32 units; /* GuC API units - 1MB or 4KB */
+ s32 count; /* Number of API units */
+ u32 flag; /* GuC API units flag */
+ } sizes[GUC_LOG_SECTIONS_LIMIT];
+ bool sizes_initialised;
+
+ /* Combined buffer allocation */
+ struct i915_vma *vma;
+ void *buf_addr;
+
+ /* RelayFS support */
+ struct {
+ bool buf_in_use;
+ bool started;
+ struct work_struct flush_work;
+ struct rchan *channel;
+ struct mutex lock;
+ u32 full_count;
+ } relay;
+
+ /* logging related stats */
+ struct {
+ u32 sampled_overflow;
+ u32 overflow;
+ u32 flush;
+ } stats[GUC_MAX_LOG_BUFFER];
+};
+
+void intel_guc_log_init_early(struct intel_guc_log *log);
+bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type,
+ unsigned int full_cnt);
+unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
+ enum guc_log_buffer_type type);
+size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log, enum guc_log_buffer_type type);
+int intel_guc_log_create(struct intel_guc_log *log);
+void intel_guc_log_destroy(struct intel_guc_log *log);
+
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
+bool intel_guc_log_relay_created(const struct intel_guc_log *log);
+int intel_guc_log_relay_open(struct intel_guc_log *log);
+int intel_guc_log_relay_start(struct intel_guc_log *log);
+void intel_guc_log_relay_flush(struct intel_guc_log *log);
+void intel_guc_log_relay_close(struct intel_guc_log *log);
+
+void intel_guc_log_handle_flush_event(struct intel_guc_log *log);
+
+static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
+{
+ return log->level;
+}
+
+void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p);
+int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
+ bool dump_load_err);
+
+u32 intel_guc_log_section_size_capture(struct intel_guc_log *log);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
new file mode 100644
index 000000000..ddfbe3346
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/fs.h>
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+#include "intel_guc.h"
+#include "intel_guc_log.h"
+#include "intel_guc_log_debugfs.h"
+#include "intel_uc.h"
+
+static u32 obj_to_guc_log_dump_size(struct drm_i915_gem_object *obj)
+{
+ u32 size;
+
+ if (!obj)
+ return PAGE_SIZE;
+
+ /* "0x%08x 0x%08x 0x%08x 0x%08x\n" => 16 bytes -> 44 chars => x2.75 */
+ size = ((obj->base.size * 11) + 3) / 4;
+
+ /* Add padding for final blank line, any extra header info, etc. */
+ size = PAGE_ALIGN(size + PAGE_SIZE);
+
+ return size;
+}
+
+static u32 guc_log_dump_size(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+
+ if (!intel_guc_is_supported(guc))
+ return PAGE_SIZE;
+
+ if (!log->vma)
+ return PAGE_SIZE;
+
+ return obj_to_guc_log_dump_size(log->vma->obj);
+}
+
+static int guc_log_dump_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ int ret;
+
+ ret = intel_guc_log_dump(m->private, &p, false);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m))
+ pr_warn_once("preallocated size:%zx for %s exceeded\n",
+ m->size, __func__);
+
+ return ret;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_log_dump, guc_log_dump_size);
+
+static u32 guc_load_err_dump_size(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+
+ if (!intel_guc_is_supported(guc))
+ return PAGE_SIZE;
+
+ return obj_to_guc_log_dump_size(uc->load_err_log);
+}
+
+static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m))
+ pr_warn_once("preallocated size:%zx for %s exceeded\n",
+ m->size, __func__);
+
+ return intel_guc_log_dump(m->private, &p, true);
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_load_err_log_dump, guc_load_err_dump_size);
+
+static int guc_log_level_get(void *data, u64 *val)
+{
+ struct intel_guc_log *log = data;
+
+ if (!log->vma)
+ return -ENODEV;
+
+ *val = intel_guc_log_get_level(log);
+
+ return 0;
+}
+
+static int guc_log_level_set(void *data, u64 val)
+{
+ struct intel_guc_log *log = data;
+
+ if (!log->vma)
+ return -ENODEV;
+
+ return intel_guc_log_set_level(log, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(guc_log_level_fops,
+ guc_log_level_get, guc_log_level_set,
+ "%lld\n");
+
+static int guc_log_relay_open(struct inode *inode, struct file *file)
+{
+ struct intel_guc_log *log = inode->i_private;
+
+ if (!intel_guc_is_ready(log_to_guc(log)))
+ return -ENODEV;
+
+ file->private_data = log;
+
+ return intel_guc_log_relay_open(log);
+}
+
+static ssize_t
+guc_log_relay_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct intel_guc_log *log = filp->private_data;
+ int val;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Enable and start the guc log relay on value of 1.
+ * Flush log relay for any other value.
+ */
+ if (val == 1)
+ ret = intel_guc_log_relay_start(log);
+ else
+ intel_guc_log_relay_flush(log);
+
+ return ret ?: cnt;
+}
+
+static int guc_log_relay_release(struct inode *inode, struct file *file)
+{
+ struct intel_guc_log *log = inode->i_private;
+
+ intel_guc_log_relay_close(log);
+ return 0;
+}
+
+static const struct file_operations guc_log_relay_fops = {
+ .owner = THIS_MODULE,
+ .open = guc_log_relay_open,
+ .write = guc_log_relay_write,
+ .release = guc_log_relay_release,
+};
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+ struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "guc_log_dump", &guc_log_dump_fops, NULL },
+ { "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL },
+ { "guc_log_level", &guc_log_level_fops, NULL },
+ { "guc_log_relay", &guc_log_relay_fops, NULL },
+ };
+
+ if (!intel_guc_is_supported(log_to_guc(log)))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
new file mode 100644
index 000000000..e8900e3d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GUC_LOG_H
+#define DEBUGFS_GUC_LOG_H
+
+struct intel_guc_log;
+struct dentry;
+
+void intel_guc_log_debugfs_register(struct intel_guc_log *log,
+ struct dentry *root);
+
+#endif /* DEBUGFS_GUC_LOG_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
new file mode 100644
index 000000000..8f8dd0583
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include "intel_guc_rc.h"
+#include "gt/intel_gt.h"
+#include "i915_drv.h"
+
+static bool __guc_rc_supported(struct intel_guc *guc)
+{
+ /* GuC RC is unavailable for pre-Gen12 */
+ return guc->submission_supported &&
+ GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+}
+
+static bool __guc_rc_selected(struct intel_guc *guc)
+{
+ if (!intel_guc_rc_is_supported(guc))
+ return false;
+
+ return guc->submission_selected;
+}
+
+void intel_guc_rc_init_early(struct intel_guc *guc)
+{
+ guc->rc_supported = __guc_rc_supported(guc);
+ guc->rc_selected = __guc_rc_selected(guc);
+}
+
+static int guc_action_control_gucrc(struct intel_guc *guc, bool enable)
+{
+ u32 rc_mode = enable ? INTEL_GUCRC_FIRMWARE_CONTROL :
+ INTEL_GUCRC_HOST_CONTROL;
+ u32 action[] = {
+ INTEL_GUC_ACTION_SETUP_PC_GUCRC,
+ rc_mode
+ };
+ int ret;
+
+ ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
+ ret = ret > 0 ? -EPROTO : ret;
+
+ return ret;
+}
+
+static int __guc_rc_control(struct intel_guc *guc, bool enable)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ if (!intel_uc_uses_guc_rc(&gt->uc))
+ return -EOPNOTSUPP;
+
+ if (!intel_guc_is_ready(guc))
+ return -EINVAL;
+
+ ret = guc_action_control_gucrc(guc, enable);
+ if (ret) {
+ i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
+ return ret;
+ }
+
+ drm_info(&gt->i915->drm, "GuC RC: %s\n",
+ str_enabled_disabled(enable));
+
+ return 0;
+}
+
+int intel_guc_rc_enable(struct intel_guc *guc)
+{
+ return __guc_rc_control(guc, true);
+}
+
+int intel_guc_rc_disable(struct intel_guc *guc)
+{
+ return __guc_rc_control(guc, false);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h
new file mode 100644
index 000000000..57e86c337
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_RC_H_
+#define _INTEL_GUC_RC_H_
+
+#include "intel_guc_submission.h"
+
+void intel_guc_rc_init_early(struct intel_guc *guc);
+
+static inline bool intel_guc_rc_is_supported(struct intel_guc *guc)
+{
+ return guc->rc_supported;
+}
+
+static inline bool intel_guc_rc_is_wanted(struct intel_guc *guc)
+{
+ return guc->submission_selected && intel_guc_rc_is_supported(guc);
+}
+
+static inline bool intel_guc_rc_is_used(struct intel_guc *guc)
+{
+ return intel_guc_submission_is_used(guc) && intel_guc_rc_is_wanted(guc);
+}
+
+int intel_guc_rc_enable(struct intel_guc *guc);
+int intel_guc_rc_disable(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
new file mode 100644
index 000000000..a7092f711
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_REG_H_
+#define _INTEL_GUC_REG_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "i915_reg_defs.h"
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define GUC_STATUS _MMIO(0xc000)
+#define GS_RESET_SHIFT 0
+#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT)
+#define GS_BOOTROM_SHIFT 1
+#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
+#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
+#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT)
+#define GS_UKERNEL_SHIFT 8
+#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
+#define GS_MIA_SHIFT 16
+#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
+#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT)
+#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT)
+#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT)
+#define GS_AUTH_STATUS_SHIFT 30
+#define GS_AUTH_STATUS_MASK (0x03U << GS_AUTH_STATUS_SHIFT)
+#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT)
+#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT)
+
+#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
+#define SOFT_SCRATCH_COUNT 16
+
+#define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4)
+#define GEN11_SOFT_SCRATCH_COUNT 4
+
+#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
+#define UOS_RSA_SCRATCH_COUNT 64
+
+#define DMA_ADDR_0_LOW _MMIO(0xc300)
+#define DMA_ADDR_0_HIGH _MMIO(0xc304)
+#define DMA_ADDR_1_LOW _MMIO(0xc308)
+#define DMA_ADDR_1_HIGH _MMIO(0xc30c)
+#define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
+#define DMA_ADDRESS_SPACE_GTT (8 << 16)
+#define DMA_COPY_SIZE _MMIO(0xc310)
+#define DMA_CTRL _MMIO(0xc314)
+#define HUC_UKERNEL (1<<9)
+#define UOS_MOVE (1<<4)
+#define START_DMA (1<<0)
+#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
+#define GUC_WOPCM_OFFSET_VALID (1<<0)
+#define HUC_LOADING_AGENT_VCR (0<<1)
+#define HUC_LOADING_AGENT_GUC (1<<1)
+#define GUC_WOPCM_OFFSET_SHIFT 14
+#define GUC_WOPCM_OFFSET_MASK (0x3ffff << GUC_WOPCM_OFFSET_SHIFT)
+#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
+
+#define HUC_STATUS2 _MMIO(0xD3B0)
+#define HUC_FW_VERIFIED (1<<7)
+
+#define GEN11_HUC_KERNEL_LOAD_INFO _MMIO(0xC1DC)
+#define HUC_LOAD_SUCCESSFUL (1 << 0)
+
+#define GUC_WOPCM_SIZE _MMIO(0xc050)
+#define GUC_WOPCM_SIZE_LOCKED (1<<0)
+#define GUC_WOPCM_SIZE_SHIFT 12
+#define GUC_WOPCM_SIZE_MASK (0xfffff << GUC_WOPCM_SIZE_SHIFT)
+
+#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
+#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
+#define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
+#define GT_DOORBELL_ENABLE (1<<0)
+
+#define GEN8_GTCR _MMIO(0x4274)
+#define GEN8_GTCR_INVALIDATE (1<<0)
+
+#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8)
+#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0)
+
+#define GUC_ARAT_C6DIS _MMIO(0xA178)
+
+#define GUC_SHIM_CONTROL _MMIO(0xc064)
+#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
+#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
+#define GUC_ENABLE_MIA_CACHING (1<<2)
+#define GUC_GEN10_MSGCH_ENABLE (1<<4)
+#define GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA (1<<9)
+#define GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA (1<<10)
+#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
+#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
+
+#define GUC_SHIM_CONTROL2 _MMIO(0xc068)
+#define GUC_IS_PRIVILEGED (1<<29)
+#define GSC_LOADS_HUC (1<<30)
+
+#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
+#define GUC_SEND_TRIGGER (1<<0)
+#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
+
+#define GEN12_GUC_SEM_INTR_ENABLES _MMIO(0xc71c)
+#define GUC_SEM_INTR_ROUTE_TO_GUC BIT(31)
+#define GUC_SEM_INTR_ENABLE_ALL (0xff)
+
+#define GUC_NUM_DOORBELLS 256
+
+/* format of the HW-monitored doorbell cacheline */
+struct guc_doorbell_info {
+ u32 db_status;
+#define GUC_DOORBELL_DISABLED 0
+#define GUC_DOORBELL_ENABLED 1
+
+ u32 cookie;
+ u32 reserved[14];
+} __packed;
+
+#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
+#define GEN8_DRB_VALID (1<<0)
+#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
+
+#define GEN12_DIST_DBS_POPULATED _MMIO(0xd08)
+#define GEN12_DOORBELLS_PER_SQIDI_SHIFT 16
+#define GEN12_DOORBELLS_PER_SQIDI (0xff)
+#define GEN12_SQIDIS_DOORBELL_EXIST (0xffff)
+
+#define DE_GUCRMR _MMIO(0x44054)
+
+#define GUC_BCS_RCS_IER _MMIO(0xC550)
+#define GUC_VCS2_VCS1_IER _MMIO(0xC554)
+#define GUC_WD_VECS_IER _MMIO(0xC558)
+#define GUC_PM_P24C_IER _MMIO(0xC55C)
+
+/* GuC Interrupt Vector */
+#define GUC_INTR_GUC2HOST BIT(15)
+#define GUC_INTR_EXEC_ERROR BIT(14)
+#define GUC_INTR_DISPLAY_EVENT BIT(13)
+#define GUC_INTR_SEM_SIG BIT(12)
+#define GUC_INTR_IOMMU2GUC BIT(11)
+#define GUC_INTR_DOORBELL_RANG BIT(10)
+#define GUC_INTR_DMA_DONE BIT(9)
+#define GUC_INTR_FATAL_ERROR BIT(8)
+#define GUC_INTR_NOTIF_ERROR BIT(7)
+#define GUC_INTR_SW_INT_6 BIT(6)
+#define GUC_INTR_SW_INT_5 BIT(5)
+#define GUC_INTR_SW_INT_4 BIT(4)
+#define GUC_INTR_SW_INT_3 BIT(3)
+#define GUC_INTR_SW_INT_2 BIT(2)
+#define GUC_INTR_SW_INT_1 BIT(1)
+#define GUC_INTR_SW_INT_0 BIT(0)
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
new file mode 100644
index 000000000..72ba1c758
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_cache.h>
+#include <linux/string_helpers.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_guc_slpc.h"
+#include "intel_mchbar_regs.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_rps.h"
+
+static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
+{
+ return container_of(slpc, struct intel_guc, slpc);
+}
+
+static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
+{
+ return guc_to_gt(slpc_to_guc(slpc));
+}
+
+static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
+{
+ return slpc_to_gt(slpc)->i915;
+}
+
+static bool __detect_slpc_supported(struct intel_guc *guc)
+{
+ /* GuC SLPC is unavailable for pre-Gen12 */
+ return guc->submission_supported &&
+ GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+}
+
+static bool __guc_slpc_selected(struct intel_guc *guc)
+{
+ if (!intel_guc_slpc_is_supported(guc))
+ return false;
+
+ return guc->submission_selected;
+}
+
+void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+
+ slpc->supported = __detect_slpc_supported(guc);
+ slpc->selected = __guc_slpc_selected(guc);
+}
+
+static void slpc_mem_set_param(struct slpc_shared_data *data,
+ u32 id, u32 value)
+{
+ GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
+ /*
+ * When the flag bit is set, corresponding value will be read
+ * and applied by SLPC.
+ */
+ data->override_params.bits[id >> 5] |= (1 << (id % 32));
+ data->override_params.values[id] = value;
+}
+
+static void slpc_mem_set_enabled(struct slpc_shared_data *data,
+ u8 enable_id, u8 disable_id)
+{
+ /*
+ * Enabling a param involves setting the enable_id
+ * to 1 and disable_id to 0.
+ */
+ slpc_mem_set_param(data, enable_id, 1);
+ slpc_mem_set_param(data, disable_id, 0);
+}
+
+static void slpc_mem_set_disabled(struct slpc_shared_data *data,
+ u8 enable_id, u8 disable_id)
+{
+ /*
+ * Disabling a param involves setting the enable_id
+ * to 0 and disable_id to 1.
+ */
+ slpc_mem_set_param(data, disable_id, 1);
+ slpc_mem_set_param(data, enable_id, 0);
+}
+
+static u32 slpc_get_state(struct intel_guc_slpc *slpc)
+{
+ struct slpc_shared_data *data;
+
+ GEM_BUG_ON(!slpc->vma);
+
+ drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
+ data = slpc->vaddr;
+
+ return data->header.global_state;
+}
+
+static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value)
+{
+ u32 request[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
+ id,
+ value,
+ };
+ int ret;
+
+ ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request), 0);
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+
+ GEM_BUG_ON(id >= SLPC_MAX_PARAM);
+
+ return guc_action_slpc_set_param_nb(guc, id, value);
+}
+
+static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
+{
+ u32 request[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
+ id,
+ value,
+ };
+ int ret;
+
+ ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static bool slpc_is_running(struct intel_guc_slpc *slpc)
+{
+ return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
+}
+
+static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
+{
+ u32 request[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
+ offset,
+ 0,
+ };
+ int ret;
+
+ ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int slpc_query_task_state(struct intel_guc_slpc *slpc)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
+ int ret;
+
+ ret = guc_action_slpc_query(guc, offset);
+ if (unlikely(ret))
+ i915_probe_error(i915, "Failed to query task state (%pe)\n",
+ ERR_PTR(ret));
+
+ drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
+
+ return ret;
+}
+
+static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ int ret;
+
+ GEM_BUG_ON(id >= SLPC_MAX_PARAM);
+
+ ret = guc_action_slpc_set_param(guc, id, value);
+ if (ret)
+ i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
+ id, value, ERR_PTR(ret));
+
+ return ret;
+}
+
+static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ lockdep_assert_held(&slpc->lock);
+
+ if (!intel_guc_is_ready(guc))
+ return -ENODEV;
+
+ /*
+ * This function is a little different as compared to
+ * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
+ * here since this is used to temporarily change min freq,
+ * for example, during a waitboost. Caller is responsible for
+ * checking bounds.
+ */
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ /* Non-blocking request will avoid stalls */
+ ret = slpc_set_param_nb(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ freq);
+ if (ret)
+ drm_notice(&i915->drm,
+ "Failed to send set_param for min freq(%d): (%d)\n",
+ freq, ret);
+ }
+
+ return ret;
+}
+
+static void slpc_boost_work(struct work_struct *work)
+{
+ struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
+ int err;
+
+ /*
+ * Raise min freq to boost. It's possible that
+ * this is greater than current max. But it will
+ * certainly be limited by RP0. An error setting
+ * the min param is not fatal.
+ */
+ mutex_lock(&slpc->lock);
+ if (atomic_read(&slpc->num_waiters)) {
+ err = slpc_force_min_freq(slpc, slpc->boost_freq);
+ if (!err)
+ slpc->num_boosts++;
+ }
+ mutex_unlock(&slpc->lock);
+}
+
+int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ int err;
+
+ GEM_BUG_ON(slpc->vma);
+
+ err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
+ if (unlikely(err)) {
+ i915_probe_error(i915,
+ "Failed to allocate SLPC struct (err=%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ slpc->max_freq_softlimit = 0;
+ slpc->min_freq_softlimit = 0;
+
+ slpc->boost_freq = 0;
+ atomic_set(&slpc->num_waiters, 0);
+ slpc->num_boosts = 0;
+ slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
+
+ mutex_init(&slpc->lock);
+ INIT_WORK(&slpc->boost_work, slpc_boost_work);
+
+ return err;
+}
+
+static const char *slpc_global_state_to_string(enum slpc_global_state state)
+{
+ switch (state) {
+ case SLPC_GLOBAL_STATE_NOT_RUNNING:
+ return "not running";
+ case SLPC_GLOBAL_STATE_INITIALIZING:
+ return "initializing";
+ case SLPC_GLOBAL_STATE_RESETTING:
+ return "resetting";
+ case SLPC_GLOBAL_STATE_RUNNING:
+ return "running";
+ case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
+ return "shutting down";
+ case SLPC_GLOBAL_STATE_ERROR:
+ return "error";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
+{
+ return slpc_global_state_to_string(slpc_get_state(slpc));
+}
+
+static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
+{
+ u32 request[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_RESET, 2),
+ offset,
+ 0,
+ };
+ int ret;
+
+ ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int slpc_reset(struct intel_guc_slpc *slpc)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
+ u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
+ int ret;
+
+ ret = guc_action_slpc_reset(guc, offset);
+
+ if (unlikely(ret < 0)) {
+ i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!ret) {
+ if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
+ i915_probe_error(i915, "SLPC not enabled! State = %s\n",
+ slpc_get_state_string(slpc));
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
+{
+ struct slpc_shared_data *data = slpc->vaddr;
+
+ GEM_BUG_ON(!slpc->vma);
+
+ return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
+ data->task_state_data.freq) *
+ GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
+}
+
+static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
+{
+ struct slpc_shared_data *data = slpc->vaddr;
+
+ GEM_BUG_ON(!slpc->vma);
+
+ return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
+ data->task_state_data.freq) *
+ GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
+}
+
+static void slpc_shared_data_reset(struct slpc_shared_data *data)
+{
+ memset(data, 0, sizeof(struct slpc_shared_data));
+
+ data->header.size = sizeof(struct slpc_shared_data);
+
+ /* Enable only GTPERF task, disable others */
+ slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
+ SLPC_PARAM_TASK_DISABLE_GTPERF);
+
+ slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
+ SLPC_PARAM_TASK_DISABLE_BALANCER);
+
+ slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
+ SLPC_PARAM_TASK_DISABLE_DCC);
+}
+
+/**
+ * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+ * @val: frequency (MHz)
+ *
+ * This function will invoke GuC SLPC action to update the max frequency
+ * limit for unslice.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret;
+
+ if (val < slpc->min_freq ||
+ val > slpc->rp0_freq ||
+ val < slpc->min_freq_softlimit)
+ return -EINVAL;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
+ val);
+
+ /* Return standardized err code for sysfs calls */
+ if (ret)
+ ret = -EIO;
+ }
+
+ if (!ret)
+ slpc->max_freq_softlimit = val;
+
+ return ret;
+}
+
+/**
+ * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+ * @val: pointer to val which will hold max frequency (MHz)
+ *
+ * This function will invoke GuC SLPC action to read the max frequency
+ * limit for unslice.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ /* Force GuC to update task data */
+ ret = slpc_query_task_state(slpc);
+
+ if (!ret)
+ *val = slpc_decode_max_freq(slpc);
+ }
+
+ return ret;
+}
+
+/**
+ * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+ * @val: frequency (MHz)
+ *
+ * This function will invoke GuC SLPC action to update the min unslice
+ * frequency.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret;
+
+ if (val < slpc->min_freq ||
+ val > slpc->rp0_freq ||
+ val > slpc->max_freq_softlimit)
+ return -EINVAL;
+
+ /* Need a lock now since waitboost can be modifying min as well */
+ mutex_lock(&slpc->lock);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ /* Ignore efficient freq if lower min freq is requested */
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+ val < slpc->rp1_freq);
+ if (ret) {
+ i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
+ ERR_PTR(ret));
+ goto out;
+ }
+
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ val);
+
+ if (!ret)
+ slpc->min_freq_softlimit = val;
+
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&slpc->lock);
+
+ /* Return standardized err code for sysfs calls */
+ if (ret)
+ ret = -EIO;
+
+ return ret;
+}
+
+/**
+ * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+ * @val: pointer to val which will hold min frequency (MHz)
+ *
+ * This function will invoke GuC SLPC action to read the min frequency
+ * limit for unslice.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ /* Force GuC to update task data */
+ ret = slpc_query_task_state(slpc);
+
+ if (!ret)
+ *val = slpc_decode_min_freq(slpc);
+ }
+
+ return ret;
+}
+
+int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ if (!HAS_MEDIA_RATIO_MODE(i915))
+ return -ENODEV;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_MEDIA_FF_RATIO_MODE,
+ val);
+ return ret;
+}
+
+void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
+{
+ u32 pm_intrmsk_mbz = 0;
+
+ /*
+ * Allow GuC to receive ARAT timer expiry event.
+ * This interrupt register is setup by RPS code
+ * when host based Turbo is enabled.
+ */
+ pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+
+ intel_uncore_rmw(gt->uncore,
+ GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
+}
+
+static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
+{
+ int ret = 0;
+
+ /*
+ * Softlimits are initially equivalent to platform limits
+ * unless they have deviated from defaults, in which case,
+ * we retain the values and set min/max accordingly.
+ */
+ if (!slpc->max_freq_softlimit) {
+ slpc->max_freq_softlimit = slpc->rp0_freq;
+ slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
+ } else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
+ ret = intel_guc_slpc_set_max_freq(slpc,
+ slpc->max_freq_softlimit);
+ }
+
+ if (unlikely(ret))
+ return ret;
+
+ if (!slpc->min_freq_softlimit) {
+ ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
+ if (unlikely(ret))
+ return ret;
+ slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
+ } else {
+ return intel_guc_slpc_set_min_freq(slpc,
+ slpc->min_freq_softlimit);
+ }
+
+ return 0;
+}
+
+static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
+{
+ /* Force SLPC to used platform rp0 */
+ return slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
+ slpc->rp0_freq);
+}
+
+static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
+{
+ struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
+ slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
+ slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
+ slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
+
+ if (!slpc->boost_freq)
+ slpc->boost_freq = slpc->rp0_freq;
+}
+
+/*
+ * intel_guc_slpc_enable() - Start SLPC
+ * @slpc: pointer to intel_guc_slpc.
+ *
+ * SLPC is enabled by setting up the shared data structure and
+ * sending reset event to GuC SLPC. Initial data is setup in
+ * intel_guc_slpc_init. Here we send the reset event. We do
+ * not currently need a slpc_disable since this is taken care
+ * of automatically when a reset/suspend occurs and the GuC
+ * CTB is destroyed.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ int ret;
+
+ GEM_BUG_ON(!slpc->vma);
+
+ slpc_shared_data_reset(slpc->vaddr);
+
+ ret = slpc_reset(slpc);
+ if (unlikely(ret < 0)) {
+ i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = slpc_query_task_state(slpc);
+ if (unlikely(ret < 0))
+ return ret;
+
+ intel_guc_pm_intrmsk_enable(to_gt(i915));
+
+ slpc_get_rp_values(slpc);
+
+ /* Set SLPC max limit to RP0 */
+ ret = slpc_use_fused_rp0(slpc);
+ if (unlikely(ret)) {
+ i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ /* Revert SLPC min/max to softlimits if necessary */
+ ret = slpc_set_softlimits(slpc);
+ if (unlikely(ret)) {
+ i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ /* Set cached media freq ratio mode */
+ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+
+ return 0;
+}
+
+int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
+{
+ int ret = 0;
+
+ if (val < slpc->min_freq || val > slpc->rp0_freq)
+ return -EINVAL;
+
+ mutex_lock(&slpc->lock);
+
+ if (slpc->boost_freq != val) {
+ /* Apply only if there are active waiters */
+ if (atomic_read(&slpc->num_waiters)) {
+ ret = slpc_force_min_freq(slpc, val);
+ if (ret) {
+ ret = -EIO;
+ goto done;
+ }
+ }
+
+ slpc->boost_freq = val;
+ }
+
+done:
+ mutex_unlock(&slpc->lock);
+ return ret;
+}
+
+void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
+{
+ /*
+ * Return min back to the softlimit.
+ * This is called during request retire,
+ * so we don't need to fail that if the
+ * set_param fails.
+ */
+ mutex_lock(&slpc->lock);
+ if (atomic_dec_and_test(&slpc->num_waiters))
+ slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
+ mutex_unlock(&slpc->lock);
+}
+
+int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct slpc_shared_data *data = slpc->vaddr;
+ struct slpc_task_state_data *slpc_tasks;
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ GEM_BUG_ON(!slpc->vma);
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ ret = slpc_query_task_state(slpc);
+
+ if (!ret) {
+ slpc_tasks = &data->task_state_data;
+
+ drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
+ drm_printf(p, "\tGTPERF task active: %s\n",
+ str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
+ drm_printf(p, "\tMax freq: %u MHz\n",
+ slpc_decode_max_freq(slpc));
+ drm_printf(p, "\tMin freq: %u MHz\n",
+ slpc_decode_min_freq(slpc));
+ drm_printf(p, "\twaitboosts: %u\n",
+ slpc->num_boosts);
+ }
+ }
+
+ return ret;
+}
+
+void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
+{
+ if (!slpc->vma)
+ return;
+
+ i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
new file mode 100644
index 000000000..82a98f78f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_SLPC_H_
+#define _INTEL_GUC_SLPC_H_
+
+#include "intel_guc_submission.h"
+#include "intel_guc_slpc_types.h"
+
+struct intel_gt;
+struct drm_printer;
+
+static inline bool intel_guc_slpc_is_supported(struct intel_guc *guc)
+{
+ return guc->slpc.supported;
+}
+
+static inline bool intel_guc_slpc_is_wanted(struct intel_guc *guc)
+{
+ return guc->slpc.selected;
+}
+
+static inline bool intel_guc_slpc_is_used(struct intel_guc *guc)
+{
+ return intel_guc_submission_is_used(guc) && intel_guc_slpc_is_wanted(guc);
+}
+
+void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc);
+
+int intel_guc_slpc_init(struct intel_guc_slpc *slpc);
+int intel_guc_slpc_enable(struct intel_guc_slpc *slpc);
+void intel_guc_slpc_fini(struct intel_guc_slpc *slpc);
+int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val);
+int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val);
+int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p);
+int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val);
+void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
+void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
+void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
new file mode 100644
index 000000000..73d208123
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_SLPC_TYPES_H_
+#define _INTEL_GUC_SLPC_TYPES_H_
+
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define SLPC_RESET_TIMEOUT_MS 5
+
+struct intel_guc_slpc {
+ struct i915_vma *vma;
+ struct slpc_shared_data *vaddr;
+ bool supported;
+ bool selected;
+
+ /* platform frequency limits */
+ u32 min_freq;
+ u32 rp0_freq;
+ u32 rp1_freq;
+ u32 boost_freq;
+
+ /* frequency softlimits */
+ u32 min_freq_softlimit;
+ u32 max_freq_softlimit;
+
+ /* cached media ratio mode */
+ u32 media_ratio_mode;
+
+ /* Protects set/reset of boost freq
+ * and value of num_waiters
+ */
+ struct mutex lock;
+
+ struct work_struct boost_work;
+ atomic_t num_waiters;
+ u32 num_boosts;
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
new file mode 100644
index 000000000..fecdc7ea7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -0,0 +1,5192 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014 Intel Corporation
+ */
+
+#include <linux/circ_buf.h>
+
+#include "gem/i915_gem_context.h"
+#include "gt/gen8_engine_cs.h"
+#include "gt/intel_breadcrumbs.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_gt_requests.h"
+#include "gt/intel_lrc.h"
+#include "gt/intel_lrc_reg.h"
+#include "gt/intel_mocs.h"
+#include "gt/intel_ring.h"
+
+#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_submission.h"
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+/**
+ * DOC: GuC-based command submission
+ *
+ * The Scratch registers:
+ * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
+ * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
+ * triggers an interrupt on the GuC via another register write (0xC4C8).
+ * Firmware writes a success/fail code back to the action register after
+ * processes the request. The kernel driver polls waiting for this update and
+ * then proceeds.
+ *
+ * Command Transport buffers (CTBs):
+ * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
+ * - G2H) are a message interface between the i915 and GuC.
+ *
+ * Context registration:
+ * Before a context can be submitted it must be registered with the GuC via a
+ * H2G. A unique guc_id is associated with each context. The context is either
+ * registered at request creation time (normal operation) or at submission time
+ * (abnormal operation, e.g. after a reset).
+ *
+ * Context submission:
+ * The i915 updates the LRC tail value in memory. The i915 must enable the
+ * scheduling of the context within the GuC for the GuC to actually consider it.
+ * Therefore, the first time a disabled context is submitted we use a schedule
+ * enable H2G, while follow up submissions are done via the context submit H2G,
+ * which informs the GuC that a previously enabled context has new work
+ * available.
+ *
+ * Context unpin:
+ * To unpin a context a H2G is used to disable scheduling. When the
+ * corresponding G2H returns indicating the scheduling disable operation has
+ * completed it is safe to unpin the context. While a disable is in flight it
+ * isn't safe to resubmit the context so a fence is used to stall all future
+ * requests of that context until the G2H is returned.
+ *
+ * Context deregistration:
+ * Before a context can be destroyed or if we steal its guc_id we must
+ * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
+ * safe to submit anything to this guc_id until the deregister completes so a
+ * fence is used to stall all requests associated with this guc_id until the
+ * corresponding G2H returns indicating the guc_id has been deregistered.
+ *
+ * submission_state.guc_ids:
+ * Unique number associated with private GuC context data passed in during
+ * context registration / submission / deregistration. 64k available. Simple ida
+ * is used for allocation.
+ *
+ * Stealing guc_ids:
+ * If no guc_ids are available they can be stolen from another context at
+ * request creation time if that context is unpinned. If a guc_id can't be found
+ * we punt this problem to the user as we believe this is near impossible to hit
+ * during normal use cases.
+ *
+ * Locking:
+ * In the GuC submission code we have 3 basic spin locks which protect
+ * everything. Details about each below.
+ *
+ * sched_engine->lock
+ * This is the submission lock for all contexts that share an i915 schedule
+ * engine (sched_engine), thus only one of the contexts which share a
+ * sched_engine can be submitting at a time. Currently only one sched_engine is
+ * used for all of GuC submission but that could change in the future.
+ *
+ * guc->submission_state.lock
+ * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
+ * list.
+ *
+ * ce->guc_state.lock
+ * Protects everything under ce->guc_state. Ensures that a context is in the
+ * correct state before issuing a H2G. e.g. We don't issue a schedule disable
+ * on a disabled context (bad idea), we don't issue a schedule enable when a
+ * schedule disable is in flight, etc... Also protects list of inflight requests
+ * on the context and the priority management state. Lock is individual to each
+ * context.
+ *
+ * Lock ordering rules:
+ * sched_engine->lock -> ce->guc_state.lock
+ * guc->submission_state.lock -> ce->guc_state.lock
+ *
+ * Reset races:
+ * When a full GT reset is triggered it is assumed that some G2H responses to
+ * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
+ * fatal as we do certain operations upon receiving a G2H (e.g. destroy
+ * contexts, release guc_ids, etc...). When this occurs we can scrub the
+ * context state and cleanup appropriately, however this is quite racey.
+ * To avoid races, the reset code must disable submission before scrubbing for
+ * the missing G2H, while the submission code must check for submission being
+ * disabled and skip sending H2Gs and updating context states when it is. Both
+ * sides must also make sure to hold the relevant locks.
+ */
+
+/* GuC Virtual Engine */
+struct guc_virtual_engine {
+ struct intel_engine_cs base;
+ struct intel_context context;
+};
+
+static struct intel_context *
+guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags);
+
+static struct intel_context *
+guc_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width);
+
+#define GUC_REQUEST_SIZE 64 /* bytes */
+
+/*
+ * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
+ * per the GuC submission interface. A different allocation algorithm is used
+ * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
+ * partition the guc_id space. We believe the number of multi-lrc contexts in
+ * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
+ * multi-lrc.
+ */
+#define NUMBER_MULTI_LRC_GUC_ID(guc) \
+ ((guc)->submission_state.num_guc_ids / 16)
+
+/*
+ * Below is a set of functions which control the GuC scheduling state which
+ * require a lock.
+ */
+#define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
+#define SCHED_STATE_DESTROYED BIT(1)
+#define SCHED_STATE_PENDING_DISABLE BIT(2)
+#define SCHED_STATE_BANNED BIT(3)
+#define SCHED_STATE_ENABLED BIT(4)
+#define SCHED_STATE_PENDING_ENABLE BIT(5)
+#define SCHED_STATE_REGISTERED BIT(6)
+#define SCHED_STATE_POLICY_REQUIRED BIT(7)
+#define SCHED_STATE_BLOCKED_SHIFT 8
+#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
+#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
+
+static inline void init_sched_state(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
+}
+
+__maybe_unused
+static bool sched_state_is_init(struct intel_context *ce)
+{
+ /* Kernel contexts can have SCHED_STATE_REGISTERED after suspend. */
+ return !(ce->guc_state.sched_state &
+ ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
+}
+
+static inline bool
+context_wait_for_deregister_to_register(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state &
+ SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
+}
+
+static inline void
+set_context_wait_for_deregister_to_register(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |=
+ SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
+}
+
+static inline void
+clr_context_wait_for_deregister_to_register(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &=
+ ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
+}
+
+static inline bool
+context_destroyed(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
+}
+
+static inline void
+set_context_destroyed(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
+}
+
+static inline bool context_pending_disable(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
+}
+
+static inline void set_context_pending_disable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
+}
+
+static inline void clr_context_pending_disable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
+}
+
+static inline bool context_banned(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_BANNED;
+}
+
+static inline void set_context_banned(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_BANNED;
+}
+
+static inline void clr_context_banned(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
+}
+
+static inline bool context_enabled(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
+}
+
+static inline void set_context_enabled(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
+}
+
+static inline void clr_context_enabled(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
+}
+
+static inline bool context_pending_enable(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline void set_context_pending_enable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline void clr_context_pending_enable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline bool context_registered(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
+}
+
+static inline void set_context_registered(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
+}
+
+static inline void clr_context_registered(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
+}
+
+static inline bool context_policy_required(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
+}
+
+static inline void set_context_policy_required(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
+}
+
+static inline void clr_context_policy_required(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
+}
+
+static inline u32 context_blocked(struct intel_context *ce)
+{
+ return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
+ SCHED_STATE_BLOCKED_SHIFT;
+}
+
+static inline void incr_context_blocked(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
+
+ GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
+}
+
+static inline void decr_context_blocked(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
+
+ ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
+}
+
+static inline bool context_has_committed_requests(struct intel_context *ce)
+{
+ return !!ce->guc_state.number_committed_requests;
+}
+
+static inline void incr_context_committed_requests(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ++ce->guc_state.number_committed_requests;
+ GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static inline void decr_context_committed_requests(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ --ce->guc_state.number_committed_requests;
+ GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static struct intel_context *
+request_to_scheduling_context(struct i915_request *rq)
+{
+ return intel_context_to_parent(rq->context);
+}
+
+static inline bool context_guc_id_invalid(struct intel_context *ce)
+{
+ return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
+}
+
+static inline void set_context_guc_id_invalid(struct intel_context *ce)
+{
+ ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
+}
+
+static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
+{
+ return &ce->engine->gt->uc.guc;
+}
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+/*
+ * When using multi-lrc submission a scratch memory area is reserved in the
+ * parent's context state for the process descriptor, work queue, and handshake
+ * between the parent + children contexts to insert safe preemption points
+ * between each of the BBs. Currently the scratch area is sized to a page.
+ *
+ * The layout of this scratch area is below:
+ * 0 guc_process_desc
+ * + sizeof(struct guc_process_desc) child go
+ * + CACHELINE_BYTES child join[0]
+ * ...
+ * + CACHELINE_BYTES child join[n - 1]
+ * ... unused
+ * PARENT_SCRATCH_SIZE / 2 work queue start
+ * ... work queue
+ * PARENT_SCRATCH_SIZE - 1 work queue end
+ */
+#define WQ_SIZE (PARENT_SCRATCH_SIZE / 2)
+#define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
+
+struct sync_semaphore {
+ u32 semaphore;
+ u8 unused[CACHELINE_BYTES - sizeof(u32)];
+};
+
+struct parent_scratch {
+ union guc_descs {
+ struct guc_sched_wq_desc wq_desc;
+ struct guc_process_desc_v69 pdesc;
+ } descs;
+
+ struct sync_semaphore go;
+ struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
+
+ u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
+ sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
+
+ u32 wq[WQ_SIZE / sizeof(u32)];
+};
+
+static u32 __get_parent_scratch_offset(struct intel_context *ce)
+{
+ GEM_BUG_ON(!ce->parallel.guc.parent_page);
+
+ return ce->parallel.guc.parent_page * PAGE_SIZE;
+}
+
+static u32 __get_wq_offset(struct intel_context *ce)
+{
+ BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
+
+ return __get_parent_scratch_offset(ce) + WQ_OFFSET;
+}
+
+static struct parent_scratch *
+__get_parent_scratch(struct intel_context *ce)
+{
+ BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
+ BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
+
+ /*
+ * Need to subtract LRC_STATE_OFFSET here as the
+ * parallel.guc.parent_page is the offset into ce->state while
+ * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
+ */
+ return (struct parent_scratch *)
+ (ce->lrc_reg_state +
+ ((__get_parent_scratch_offset(ce) -
+ LRC_STATE_OFFSET) / sizeof(u32)));
+}
+
+static struct guc_process_desc_v69 *
+__get_process_desc_v69(struct intel_context *ce)
+{
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+
+ return &ps->descs.pdesc;
+}
+
+static struct guc_sched_wq_desc *
+__get_wq_desc_v70(struct intel_context *ce)
+{
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+
+ return &ps->descs.wq_desc;
+}
+
+static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
+{
+ /*
+ * Check for space in work queue. Caching a value of head pointer in
+ * intel_context structure in order reduce the number accesses to shared
+ * GPU memory which may be across a PCIe bus.
+ */
+#define AVAILABLE_SPACE \
+ CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
+ if (wqi_size > AVAILABLE_SPACE) {
+ ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
+
+ if (wqi_size > AVAILABLE_SPACE)
+ return NULL;
+ }
+#undef AVAILABLE_SPACE
+
+ return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
+}
+
+static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
+{
+ struct intel_context *ce = xa_load(&guc->context_lookup, id);
+
+ GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID);
+
+ return ce;
+}
+
+static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
+{
+ struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
+
+ if (!base)
+ return NULL;
+
+ GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
+
+ return &base[index];
+}
+
+static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
+{
+ u32 size;
+ int ret;
+
+ size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
+ GUC_MAX_CONTEXT_ID);
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
+ (void **)&guc->lrc_desc_pool_vaddr_v69);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
+{
+ if (!guc->lrc_desc_pool_vaddr_v69)
+ return;
+
+ guc->lrc_desc_pool_vaddr_v69 = NULL;
+ i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
+}
+
+static inline bool guc_submission_initialized(struct intel_guc *guc)
+{
+ return guc->submission_initialized;
+}
+
+static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
+{
+ struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
+
+ if (desc)
+ memset(desc, 0, sizeof(*desc));
+}
+
+static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
+{
+ return __get_context(guc, id);
+}
+
+static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
+ struct intel_context *ce)
+{
+ unsigned long flags;
+
+ /*
+ * xarray API doesn't have xa_save_irqsave wrapper, so calling the
+ * lower level functions directly.
+ */
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
+{
+ unsigned long flags;
+
+ if (unlikely(!guc_submission_initialized(guc)))
+ return;
+
+ _reset_lrc_desc_v69(guc, id);
+
+ /*
+ * xarray API doesn't have xa_erase_irqsave wrapper, so calling
+ * the lower level functions directly.
+ */
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ __xa_erase(&guc->context_lookup, id);
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+static void decr_outstanding_submission_g2h(struct intel_guc *guc)
+{
+ if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
+ wake_up_all(&guc->ct.wq);
+}
+
+static int guc_submission_send_busy_loop(struct intel_guc *guc,
+ const u32 *action,
+ u32 len,
+ u32 g2h_len_dw,
+ bool loop)
+{
+ /*
+ * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
+ * so we don't handle the case where we don't get a reply because we
+ * aborted the send due to the channel being busy.
+ */
+ GEM_BUG_ON(g2h_len_dw && !loop);
+
+ if (g2h_len_dw)
+ atomic_inc(&guc->outstanding_submission_g2h);
+
+ return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+}
+
+int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
+ atomic_t *wait_var,
+ bool interruptible,
+ long timeout)
+{
+ const int state = interruptible ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ DEFINE_WAIT(wait);
+
+ might_sleep();
+ GEM_BUG_ON(timeout < 0);
+
+ if (!atomic_read(wait_var))
+ return 0;
+
+ if (!timeout)
+ return -ETIME;
+
+ for (;;) {
+ prepare_to_wait(&guc->ct.wq, &wait, state);
+
+ if (!atomic_read(wait_var))
+ break;
+
+ if (signal_pending_state(state, current)) {
+ timeout = -EINTR;
+ break;
+ }
+
+ if (!timeout) {
+ timeout = -ETIME;
+ break;
+ }
+
+ timeout = io_schedule_timeout(timeout);
+ }
+ finish_wait(&guc->ct.wq, &wait);
+
+ return (timeout < 0) ? timeout : 0;
+}
+
+int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
+{
+ if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
+ return 0;
+
+ return intel_guc_wait_for_pending_msg(guc,
+ &guc->outstanding_submission_g2h,
+ true, timeout);
+}
+
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
+static int try_context_registration(struct intel_context *ce, bool loop);
+
+static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+{
+ int err = 0;
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ u32 action[3];
+ int len = 0;
+ u32 g2h_len_dw = 0;
+ bool enabled;
+
+ lockdep_assert_held(&rq->engine->sched_engine->lock);
+
+ /*
+ * Corner case where requests were sitting in the priority list or a
+ * request resubmitted after the context was banned.
+ */
+ if (unlikely(!intel_context_is_schedulable(ce))) {
+ i915_request_put(i915_request_mark_eio(rq));
+ intel_engine_signal_breadcrumbs(ce->engine);
+ return 0;
+ }
+
+ GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
+ GEM_BUG_ON(context_guc_id_invalid(ce));
+
+ if (context_policy_required(ce)) {
+ err = guc_context_policy_init_v70(ce, false);
+ if (err)
+ return err;
+ }
+
+ spin_lock(&ce->guc_state.lock);
+
+ /*
+ * The request / context will be run on the hardware when scheduling
+ * gets enabled in the unblock. For multi-lrc we still submit the
+ * context to move the LRC tails.
+ */
+ if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
+ goto out;
+
+ enabled = context_enabled(ce) || context_blocked(ce);
+
+ if (!enabled) {
+ action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
+ action[len++] = ce->guc_id.id;
+ action[len++] = GUC_CONTEXT_ENABLE;
+ set_context_pending_enable(ce);
+ intel_context_get(ce);
+ g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
+ } else {
+ action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
+ action[len++] = ce->guc_id.id;
+ }
+
+ err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
+ if (!enabled && !err) {
+ trace_intel_context_sched_enable(ce);
+ atomic_inc(&guc->outstanding_submission_g2h);
+ set_context_enabled(ce);
+
+ /*
+ * Without multi-lrc KMD does the submission step (moving the
+ * lrc tail) so enabling scheduling is sufficient to submit the
+ * context. This isn't the case in multi-lrc submission as the
+ * GuC needs to move the tails, hence the need for another H2G
+ * to submit a multi-lrc context after enabling scheduling.
+ */
+ if (intel_context_is_parent(ce)) {
+ action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
+ err = intel_guc_send_nb(guc, action, len - 1, 0);
+ }
+ } else if (!enabled) {
+ clr_context_pending_enable(ce);
+ intel_context_put(ce);
+ }
+ if (likely(!err))
+ trace_i915_request_guc_submit(rq);
+
+out:
+ spin_unlock(&ce->guc_state.lock);
+ return err;
+}
+
+static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+{
+ int ret = __guc_add_request(guc, rq);
+
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_ADD_REQUEST;
+ }
+
+ return ret;
+}
+
+static inline void guc_set_lrc_tail(struct i915_request *rq)
+{
+ rq->context->lrc_reg_state[CTX_RING_TAIL] =
+ intel_ring_set_tail(rq->ring, rq->tail);
+}
+
+static inline int rq_prio(const struct i915_request *rq)
+{
+ return rq->sched.attr.priority;
+}
+
+static bool is_multi_lrc_rq(struct i915_request *rq)
+{
+ return intel_context_is_parallel(rq->context);
+}
+
+static bool can_merge_rq(struct i915_request *rq,
+ struct i915_request *last)
+{
+ return request_to_scheduling_context(rq) ==
+ request_to_scheduling_context(last);
+}
+
+static u32 wq_space_until_wrap(struct intel_context *ce)
+{
+ return (WQ_SIZE - ce->parallel.guc.wqi_tail);
+}
+
+static void write_wqi(struct intel_context *ce, u32 wqi_size)
+{
+ BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
+
+ /*
+ * Ensure WQI are visible before updating tail
+ */
+ intel_guc_write_barrier(ce_to_guc(ce));
+
+ ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
+ (WQ_SIZE - 1);
+ WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
+}
+
+static int guc_wq_noop_append(struct intel_context *ce)
+{
+ u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
+ u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
+
+ if (!wqi)
+ return -EBUSY;
+
+ GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
+
+ *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, len_dw);
+ ce->parallel.guc.wqi_tail = 0;
+
+ return 0;
+}
+
+static int __guc_wq_item_append(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ struct intel_context *child;
+ unsigned int wqi_size = (ce->parallel.number_children + 4) *
+ sizeof(u32);
+ u32 *wqi;
+ u32 len_dw = (wqi_size / sizeof(u32)) - 1;
+ int ret;
+
+ /* Ensure context is in correct state updating work queue */
+ GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
+ GEM_BUG_ON(context_guc_id_invalid(ce));
+ GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
+ GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
+
+ /* Insert NOOP if this work queue item will wrap the tail pointer. */
+ if (wqi_size > wq_space_until_wrap(ce)) {
+ ret = guc_wq_noop_append(ce);
+ if (ret)
+ return ret;
+ }
+
+ wqi = get_wq_pointer(ce, wqi_size);
+ if (!wqi)
+ return -EBUSY;
+
+ GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
+
+ *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
+ FIELD_PREP(WQ_LEN_MASK, len_dw);
+ *wqi++ = ce->lrc.lrca;
+ *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
+ FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
+ *wqi++ = 0; /* fence_id */
+ for_each_child(ce, child)
+ *wqi++ = child->ring->tail / sizeof(u64);
+
+ write_wqi(ce, wqi_size);
+
+ return 0;
+}
+
+static int guc_wq_item_append(struct intel_guc *guc,
+ struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ int ret;
+
+ if (unlikely(!intel_context_is_schedulable(ce)))
+ return 0;
+
+ ret = __guc_wq_item_append(rq);
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
+ }
+
+ return ret;
+}
+
+static bool multi_lrc_submit(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+
+ intel_ring_set_tail(rq->ring, rq->tail);
+
+ /*
+ * We expect the front end (execbuf IOCTL) to set this flag on the last
+ * request generated from a multi-BB submission. This indicates to the
+ * backend (GuC interface) that we should submit this context thus
+ * submitting all the requests generated in parallel.
+ */
+ return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
+ !intel_context_is_schedulable(ce);
+}
+
+static int guc_dequeue_one_context(struct intel_guc *guc)
+{
+ struct i915_sched_engine * const sched_engine = guc->sched_engine;
+ struct i915_request *last = NULL;
+ bool submit = false;
+ struct rb_node *rb;
+ int ret;
+
+ lockdep_assert_held(&sched_engine->lock);
+
+ if (guc->stalled_request) {
+ submit = true;
+ last = guc->stalled_request;
+
+ switch (guc->submission_stall_reason) {
+ case STALL_REGISTER_CONTEXT:
+ goto register_context;
+ case STALL_MOVE_LRC_TAIL:
+ goto move_lrc_tail;
+ case STALL_ADD_REQUEST:
+ goto add_request;
+ default:
+ MISSING_CASE(guc->submission_stall_reason);
+ }
+ }
+
+ while ((rb = rb_first_cached(&sched_engine->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
+
+ priolist_for_each_request_consume(rq, rn, p) {
+ if (last && !can_merge_rq(rq, last))
+ goto register_context;
+
+ list_del_init(&rq->sched.link);
+
+ __i915_request_submit(rq);
+
+ trace_i915_request_in(rq, 0);
+ last = rq;
+
+ if (is_multi_lrc_rq(rq)) {
+ /*
+ * We need to coalesce all multi-lrc requests in
+ * a relationship into a single H2G. We are
+ * guaranteed that all of these requests will be
+ * submitted sequentially.
+ */
+ if (multi_lrc_submit(rq)) {
+ submit = true;
+ goto register_context;
+ }
+ } else {
+ submit = true;
+ }
+ }
+
+ rb_erase_cached(&p->node, &sched_engine->queue);
+ i915_priolist_free(p);
+ }
+
+register_context:
+ if (submit) {
+ struct intel_context *ce = request_to_scheduling_context(last);
+
+ if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
+ intel_context_is_schedulable(ce))) {
+ ret = try_context_registration(ce, false);
+ if (unlikely(ret == -EPIPE)) {
+ goto deadlk;
+ } else if (ret == -EBUSY) {
+ guc->stalled_request = last;
+ guc->submission_stall_reason =
+ STALL_REGISTER_CONTEXT;
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
+ goto deadlk;
+ }
+ }
+
+move_lrc_tail:
+ if (is_multi_lrc_rq(last)) {
+ ret = guc_wq_item_append(guc, last);
+ if (ret == -EBUSY) {
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
+ goto deadlk;
+ }
+ } else {
+ guc_set_lrc_tail(last);
+ }
+
+add_request:
+ ret = guc_add_request(guc, last);
+ if (unlikely(ret == -EPIPE)) {
+ goto deadlk;
+ } else if (ret == -EBUSY) {
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
+ goto deadlk;
+ }
+ }
+
+ guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
+ return submit;
+
+deadlk:
+ sched_engine->tasklet.callback = NULL;
+ tasklet_disable_nosync(&sched_engine->tasklet);
+ return false;
+
+schedule_tasklet:
+ tasklet_schedule(&sched_engine->tasklet);
+ return false;
+}
+
+static void guc_submission_tasklet(struct tasklet_struct *t)
+{
+ struct i915_sched_engine *sched_engine =
+ from_tasklet(sched_engine, t, tasklet);
+ unsigned long flags;
+ bool loop;
+
+ spin_lock_irqsave(&sched_engine->lock, flags);
+
+ do {
+ loop = guc_dequeue_one_context(sched_engine->private_data);
+ } while (loop);
+
+ i915_sched_engine_reset_on_empty(sched_engine);
+
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ if (iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(engine);
+}
+
+static void __guc_context_destroy(struct intel_context *ce);
+static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
+static void guc_signal_context_fence(struct intel_context *ce);
+static void guc_cancel_context_requests(struct intel_context *ce);
+static void guc_blocked_fence_complete(struct intel_context *ce);
+
+static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
+{
+ struct intel_context *ce;
+ unsigned long index, flags;
+ bool pending_disable, pending_enable, deregister, destroyed, banned;
+
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ /*
+ * Corner case where the ref count on the object is zero but and
+ * deregister G2H was lost. In this case we don't touch the ref
+ * count and finish the destroy of the context.
+ */
+ bool do_put = kref_get_unless_zero(&ce->ref);
+
+ xa_unlock(&guc->context_lookup);
+
+ spin_lock(&ce->guc_state.lock);
+
+ /*
+ * Once we are at this point submission_disabled() is guaranteed
+ * to be visible to all callers who set the below flags (see above
+ * flush and flushes in reset_prepare). If submission_disabled()
+ * is set, the caller shouldn't set these flags.
+ */
+
+ destroyed = context_destroyed(ce);
+ pending_enable = context_pending_enable(ce);
+ pending_disable = context_pending_disable(ce);
+ deregister = context_wait_for_deregister_to_register(ce);
+ banned = context_banned(ce);
+ init_sched_state(ce);
+
+ spin_unlock(&ce->guc_state.lock);
+
+ if (pending_enable || destroyed || deregister) {
+ decr_outstanding_submission_g2h(guc);
+ if (deregister)
+ guc_signal_context_fence(ce);
+ if (destroyed) {
+ intel_gt_pm_put_async(guc_to_gt(guc));
+ release_guc_id(guc, ce);
+ __guc_context_destroy(ce);
+ }
+ if (pending_enable || deregister)
+ intel_context_put(ce);
+ }
+
+ /* Not mutualy exclusive with above if statement. */
+ if (pending_disable) {
+ guc_signal_context_fence(ce);
+ if (banned) {
+ guc_cancel_context_requests(ce);
+ intel_engine_signal_breadcrumbs(ce->engine);
+ }
+ intel_context_sched_disable_unpin(ce);
+ decr_outstanding_submission_g2h(guc);
+
+ spin_lock(&ce->guc_state.lock);
+ guc_blocked_fence_complete(ce);
+ spin_unlock(&ce->guc_state.lock);
+
+ intel_context_put(ce);
+ }
+
+ if (do_put)
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+/*
+ * GuC stores busyness stats for each engine at context in/out boundaries. A
+ * context 'in' logs execution start time, 'out' adds in -> out delta to total.
+ * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
+ * GuC.
+ *
+ * __i915_pmu_event_read samples engine busyness. When sampling, if context id
+ * is valid (!= ~0) and start is non-zero, the engine is considered to be
+ * active. For an active engine total busyness = total + (now - start), where
+ * 'now' is the time at which the busyness is sampled. For inactive engine,
+ * total busyness = total.
+ *
+ * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
+ *
+ * The start and total values provided by GuC are 32 bits and wrap around in a
+ * few minutes. Since perf pmu provides busyness as 64 bit monotonically
+ * increasing ns values, there is a need for this implementation to account for
+ * overflows and extend the GuC provided values to 64 bits before returning
+ * busyness to the user. In order to do that, a worker runs periodically at
+ * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
+ * 27 seconds for a gt clock frequency of 19.2 MHz).
+ */
+
+#define WRAP_TIME_CLKS U32_MAX
+#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
+
+static void
+__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
+{
+ u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+ u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
+
+ if (new_start == lower_32_bits(*prev_start))
+ return;
+
+ /*
+ * When gt is unparked, we update the gt timestamp and start the ping
+ * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
+ * is unparked, all switched in contexts will have a start time that is
+ * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
+ *
+ * If neither gt_stamp nor new_start has rolled over, then the
+ * gt_stamp_hi does not need to be adjusted, however if one of them has
+ * rolled over, we need to adjust gt_stamp_hi accordingly.
+ *
+ * The below conditions address the cases of new_start rollover and
+ * gt_stamp_last rollover respectively.
+ */
+ if (new_start < gt_stamp_last &&
+ (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
+ gt_stamp_hi++;
+
+ if (new_start > gt_stamp_last &&
+ (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
+ gt_stamp_hi--;
+
+ *prev_start = ((u64)gt_stamp_hi << 32) | new_start;
+}
+
+#define record_read(map_, field_) \
+ iosys_map_rd_field(map_, 0, struct guc_engine_usage_record, field_)
+
+/*
+ * GuC updates shared memory and KMD reads it. Since this is not synchronized,
+ * we run into a race where the value read is inconsistent. Sometimes the
+ * inconsistency is in reading the upper MSB bytes of the last_in value when
+ * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
+ * 24 bits are zero. Since these are non-zero values, it is non-trivial to
+ * determine validity of these values. Instead we read the values multiple times
+ * until they are consistent. In test runs, 3 attempts results in consistent
+ * values. The upper bound is set to 6 attempts and may need to be tuned as per
+ * any new occurences.
+ */
+static void __get_engine_usage_record(struct intel_engine_cs *engine,
+ u32 *last_in, u32 *id, u32 *total)
+{
+ struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
+ int i = 0;
+
+ do {
+ *last_in = record_read(&rec_map, last_switch_in_stamp);
+ *id = record_read(&rec_map, current_context_index);
+ *total = record_read(&rec_map, total_runtime);
+
+ if (record_read(&rec_map, last_switch_in_stamp) == *last_in &&
+ record_read(&rec_map, current_context_index) == *id &&
+ record_read(&rec_map, total_runtime) == *total)
+ break;
+ } while (++i < 6);
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
+ struct intel_engine_guc_stats *stats = &engine->stats.guc;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u32 last_switch, ctx_id, total;
+
+ lockdep_assert_held(&guc->timestamp.lock);
+
+ __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
+
+ stats->running = ctx_id != ~0U && last_switch;
+ if (stats->running)
+ __extend_last_switch(guc, &stats->start_gt_clk, last_switch);
+
+ /*
+ * Instead of adjusting the total for overflow, just add the
+ * difference from previous sample stats->total_gt_clks
+ */
+ if (total && total != ~0U) {
+ stats->total_gt_clks += (u32)(total - stats->prev_total);
+ stats->prev_total = total;
+ }
+}
+
+static u32 gpm_timestamp_shift(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+
+ shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
+
+ return 3 - shift;
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 gt_stamp_lo, gt_stamp_hi;
+ u64 gpm_ts;
+
+ lockdep_assert_held(&guc->timestamp.lock);
+
+ gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
+ gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0,
+ MISC_STATUS1) >> guc->timestamp.shift;
+ gt_stamp_lo = lower_32_bits(gpm_ts);
+ *now = ktime_get();
+
+ if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
+ gt_stamp_hi++;
+
+ guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
+}
+
+/*
+ * Unlike the execlist mode of submission total and active times are in terms of
+ * gt clocks. The *now parameter is retained to return the cpu time at which the
+ * busyness was sampled.
+ */
+static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
+{
+ struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
+ struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
+ struct intel_gt *gt = engine->gt;
+ struct intel_guc *guc = &gt->uc.guc;
+ u64 total, gt_stamp_saved;
+ unsigned long flags;
+ u32 reset_count;
+ bool in_reset;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+ /*
+ * If a reset happened, we risk reading partially updated engine
+ * busyness from GuC, so we just use the driver stored copy of busyness.
+ * Synchronize with gt reset using reset_count and the
+ * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
+ * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
+ * usable by checking the flag afterwards.
+ */
+ reset_count = i915_reset_count(gpu_error);
+ in_reset = test_bit(I915_RESET_BACKOFF, &gt->reset.flags);
+
+ *now = ktime_get();
+
+ /*
+ * The active busyness depends on start_gt_clk and gt_stamp.
+ * gt_stamp is updated by i915 only when gt is awake and the
+ * start_gt_clk is derived from GuC state. To get a consistent
+ * view of activity, we query the GuC state only if gt is awake.
+ */
+ if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
+ stats_saved = *stats;
+ gt_stamp_saved = guc->timestamp.gt_stamp;
+ /*
+ * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
+ * start_gt_clk' calculation below for active engines.
+ */
+ guc_update_engine_gt_clks(engine);
+ guc_update_pm_timestamp(guc, now);
+ intel_gt_pm_put_async(gt);
+ if (i915_reset_count(gpu_error) != reset_count) {
+ *stats = stats_saved;
+ guc->timestamp.gt_stamp = gt_stamp_saved;
+ }
+ }
+
+ total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
+ if (stats->running) {
+ u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+
+ total += intel_gt_clock_interval_to_ns(gt, clk);
+ }
+
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+
+ return ns_to_ktime(total);
+}
+
+static void __reset_guc_busyness_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+ ktime_t unused;
+
+ cancel_delayed_work_sync(&guc->timestamp.work);
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+ guc_update_pm_timestamp(guc, &unused);
+ for_each_engine(engine, gt, id) {
+ guc_update_engine_gt_clks(engine);
+ engine->stats.guc.prev_total = 0;
+ }
+
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void __update_guc_busyness_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+ ktime_t unused;
+
+ guc->timestamp.last_stat_jiffies = jiffies;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+
+ guc_update_pm_timestamp(guc, &unused);
+ for_each_engine(engine, gt, id)
+ guc_update_engine_gt_clks(engine);
+
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void guc_timestamp_ping(struct work_struct *wrk)
+{
+ struct intel_guc *guc = container_of(wrk, typeof(*guc),
+ timestamp.work.work);
+ struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+ int srcu, ret;
+
+ /*
+ * Synchronize with gt reset to make sure the worker does not
+ * corrupt the engine/guc stats.
+ */
+ ret = intel_gt_reset_trylock(gt, &srcu);
+ if (ret)
+ return;
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ __update_guc_busyness_stats(guc);
+
+ intel_gt_reset_unlock(gt, srcu);
+
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+}
+
+static int guc_action_enable_usage_stats(struct intel_guc *guc)
+{
+ u32 offset = intel_guc_engine_usage_offset(guc);
+ u32 action[] = {
+ INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
+ offset,
+ 0,
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static void guc_init_engine_stats(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
+ int ret = guc_action_enable_usage_stats(guc);
+
+ if (ret)
+ drm_err(&gt->i915->drm,
+ "Failed to enable usage stats: %d!\n", ret);
+ }
+}
+
+void intel_guc_busyness_park(struct intel_gt *gt)
+{
+ struct intel_guc *guc = &gt->uc.guc;
+
+ if (!guc_submission_initialized(guc))
+ return;
+
+ /*
+ * There is a race with suspend flow where the worker runs after suspend
+ * and causes an unclaimed register access warning. Cancel the worker
+ * synchronously here.
+ */
+ cancel_delayed_work_sync(&guc->timestamp.work);
+
+ /*
+ * Before parking, we should sample engine busyness stats if we need to.
+ * We can skip it if we are less than half a ping from the last time we
+ * sampled the busyness stats.
+ */
+ if (guc->timestamp.last_stat_jiffies &&
+ !time_after(jiffies, guc->timestamp.last_stat_jiffies +
+ (guc->timestamp.ping_delay / 2)))
+ return;
+
+ __update_guc_busyness_stats(guc);
+}
+
+void intel_guc_busyness_unpark(struct intel_gt *gt)
+{
+ struct intel_guc *guc = &gt->uc.guc;
+ unsigned long flags;
+ ktime_t unused;
+
+ if (!guc_submission_initialized(guc))
+ return;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
+ guc->timestamp.ping_delay);
+}
+
+static inline bool
+submission_disabled(struct intel_guc *guc)
+{
+ struct i915_sched_engine * const sched_engine = guc->sched_engine;
+
+ return unlikely(!sched_engine ||
+ !__tasklet_is_enabled(&sched_engine->tasklet) ||
+ intel_gt_is_wedged(guc_to_gt(guc)));
+}
+
+static void disable_submission(struct intel_guc *guc)
+{
+ struct i915_sched_engine * const sched_engine = guc->sched_engine;
+
+ if (__tasklet_is_enabled(&sched_engine->tasklet)) {
+ GEM_BUG_ON(!guc->ct.enabled);
+ __tasklet_disable_sync_once(&sched_engine->tasklet);
+ sched_engine->tasklet.callback = NULL;
+ }
+}
+
+static void enable_submission(struct intel_guc *guc)
+{
+ struct i915_sched_engine * const sched_engine = guc->sched_engine;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->sched_engine->lock, flags);
+ sched_engine->tasklet.callback = guc_submission_tasklet;
+ wmb(); /* Make sure callback visible */
+ if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
+ __tasklet_enable(&sched_engine->tasklet)) {
+ GEM_BUG_ON(!guc->ct.enabled);
+
+ /* And kick in case we missed a new request submission. */
+ tasklet_hi_schedule(&sched_engine->tasklet);
+ }
+ spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
+}
+
+static void guc_flush_submissions(struct intel_guc *guc)
+{
+ struct i915_sched_engine * const sched_engine = guc->sched_engine;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched_engine->lock, flags);
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+static void guc_flush_destroyed_contexts(struct intel_guc *guc);
+
+void intel_guc_submission_reset_prepare(struct intel_guc *guc)
+{
+ if (unlikely(!guc_submission_initialized(guc))) {
+ /* Reset called during driver load? GuC not yet initialised! */
+ return;
+ }
+
+ intel_gt_park_heartbeats(guc_to_gt(guc));
+ disable_submission(guc);
+ guc->interrupts.disable(guc);
+ __reset_guc_busyness_stats(guc);
+
+ /* Flush IRQ handler */
+ spin_lock_irq(guc_to_gt(guc)->irq_lock);
+ spin_unlock_irq(guc_to_gt(guc)->irq_lock);
+
+ guc_flush_submissions(guc);
+ guc_flush_destroyed_contexts(guc);
+ flush_work(&guc->ct.requests.worker);
+
+ scrub_guc_desc_for_outstanding_g2h(guc);
+}
+
+static struct intel_engine_cs *
+guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
+{
+ struct intel_engine_cs *engine;
+ intel_engine_mask_t tmp, mask = ve->mask;
+ unsigned int num_siblings = 0;
+
+ for_each_engine_masked(engine, ve->gt, mask, tmp)
+ if (num_siblings++ == sibling)
+ return engine;
+
+ return NULL;
+}
+
+static inline struct intel_engine_cs *
+__context_to_physical_engine(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+
+ if (intel_engine_is_virtual(engine))
+ engine = guc_virtual_get_sibling(engine, 0);
+
+ return engine;
+}
+
+static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
+{
+ struct intel_engine_cs *engine = __context_to_physical_engine(ce);
+
+ if (!intel_context_is_schedulable(ce))
+ return;
+
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ if (scrub)
+ lrc_init_regs(ce, engine, true);
+
+ /* Rerun the request; its payload has been neutered (if guilty). */
+ lrc_update_regs(ce, engine, head);
+}
+
+static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
+{
+ if (!IS_GRAPHICS_VER(engine->i915, 11, 12))
+ return;
+
+ intel_engine_stop_cs(engine);
+
+ /*
+ * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+ * to wait for any pending mi force wakeups
+ */
+ intel_engine_wait_for_pending_mi_fw(engine);
+}
+
+static void guc_reset_nop(struct intel_engine_cs *engine)
+{
+}
+
+static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
+{
+}
+
+static void
+__unwind_incomplete_requests(struct intel_context *ce)
+{
+ struct i915_request *rq, *rn;
+ struct list_head *pl;
+ int prio = I915_PRIORITY_INVALID;
+ struct i915_sched_engine * const sched_engine =
+ ce->engine->sched_engine;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched_engine->lock, flags);
+ spin_lock(&ce->guc_state.lock);
+ list_for_each_entry_safe_reverse(rq, rn,
+ &ce->guc_state.requests,
+ sched.link) {
+ if (i915_request_completed(rq))
+ continue;
+
+ list_del_init(&rq->sched.link);
+ __i915_request_unsubmit(rq);
+
+ /* Push the request back into the queue for later resubmission. */
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(sched_engine, prio);
+ }
+ GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
+
+ list_add(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ }
+ spin_unlock(&ce->guc_state.lock);
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
+{
+ bool guilty;
+ struct i915_request *rq;
+ unsigned long flags;
+ u32 head;
+ int i, number_children = ce->parallel.number_children;
+ struct intel_context *parent = ce;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ intel_context_get(ce);
+
+ /*
+ * GuC will implicitly mark the context as non-schedulable when it sends
+ * the reset notification. Make sure our state reflects this change. The
+ * context will be marked enabled on resubmission.
+ */
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ clr_context_enabled(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ /*
+ * For each context in the relationship find the hanging request
+ * resetting each context / request as needed
+ */
+ for (i = 0; i < number_children + 1; ++i) {
+ if (!intel_context_is_pinned(ce))
+ goto next_context;
+
+ guilty = false;
+ rq = intel_context_get_active_request(ce);
+ if (!rq) {
+ head = ce->ring->tail;
+ goto out_replay;
+ }
+
+ if (i915_request_started(rq))
+ guilty = stalled & ce->engine->mask;
+
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ head = intel_ring_wrap(ce->ring, rq->head);
+
+ __i915_request_reset(rq, guilty);
+ i915_request_put(rq);
+out_replay:
+ guc_reset_state(ce, head, guilty);
+next_context:
+ if (i != number_children)
+ ce = list_next_entry(ce, parallel.child_link);
+ }
+
+ __unwind_incomplete_requests(parent);
+ intel_context_put(parent);
+}
+
+void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
+{
+ struct intel_context *ce;
+ unsigned long index;
+ unsigned long flags;
+
+ if (unlikely(!guc_submission_initialized(guc))) {
+ /* Reset called during driver load? GuC not yet initialised! */
+ return;
+ }
+
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ if (!kref_get_unless_zero(&ce->ref))
+ continue;
+
+ xa_unlock(&guc->context_lookup);
+
+ if (intel_context_is_pinned(ce) &&
+ !intel_context_is_child(ce))
+ __guc_reset_context(ce, stalled);
+
+ intel_context_put(ce);
+
+ xa_lock(&guc->context_lookup);
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
+ /* GuC is blown away, drop all references to contexts */
+ xa_destroy(&guc->context_lookup);
+}
+
+static void guc_cancel_context_requests(struct intel_context *ce)
+{
+ struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
+ struct i915_request *rq;
+ unsigned long flags;
+
+ /* Mark all executing requests as skipped. */
+ spin_lock_irqsave(&sched_engine->lock, flags);
+ spin_lock(&ce->guc_state.lock);
+ list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
+ i915_request_put(i915_request_mark_eio(rq));
+ spin_unlock(&ce->guc_state.lock);
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+static void
+guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
+{
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ /* Can be called during boot if GuC fails to load */
+ if (!sched_engine)
+ return;
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ spin_lock_irqsave(&sched_engine->lock, flags);
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&sched_engine->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+
+ priolist_for_each_request_consume(rq, rn, p) {
+ list_del_init(&rq->sched.link);
+
+ __i915_request_submit(rq);
+
+ i915_request_put(i915_request_mark_eio(rq));
+ }
+
+ rb_erase_cached(&p->node, &sched_engine->queue);
+ i915_priolist_free(p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ sched_engine->queue_priority_hint = INT_MIN;
+ sched_engine->queue = RB_ROOT_CACHED;
+
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+void intel_guc_submission_cancel_requests(struct intel_guc *guc)
+{
+ struct intel_context *ce;
+ unsigned long index;
+ unsigned long flags;
+
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ if (!kref_get_unless_zero(&ce->ref))
+ continue;
+
+ xa_unlock(&guc->context_lookup);
+
+ if (intel_context_is_pinned(ce) &&
+ !intel_context_is_child(ce))
+ guc_cancel_context_requests(ce);
+
+ intel_context_put(ce);
+
+ xa_lock(&guc->context_lookup);
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
+ guc_cancel_sched_engine_requests(guc->sched_engine);
+
+ /* GuC is blown away, drop all references to contexts */
+ xa_destroy(&guc->context_lookup);
+}
+
+void intel_guc_submission_reset_finish(struct intel_guc *guc)
+{
+ /* Reset called during driver load or during wedge? */
+ if (unlikely(!guc_submission_initialized(guc) ||
+ intel_gt_is_wedged(guc_to_gt(guc)))) {
+ return;
+ }
+
+ /*
+ * Technically possible for either of these values to be non-zero here,
+ * but very unlikely + harmless. Regardless let's add a warn so we can
+ * see in CI if this happens frequently / a precursor to taking down the
+ * machine.
+ */
+ GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
+ atomic_set(&guc->outstanding_submission_g2h, 0);
+
+ intel_guc_global_policies_update(guc);
+ enable_submission(guc);
+ intel_gt_unpark_heartbeats(guc_to_gt(guc));
+}
+
+static void destroyed_worker_func(struct work_struct *w);
+static void reset_fail_worker_func(struct work_struct *w);
+
+/*
+ * Set up the memory resources to be shared with the GuC (via the GGTT)
+ * at firmware loading time.
+ */
+int intel_guc_submission_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ if (guc->submission_initialized)
+ return 0;
+
+ if (GET_UC_VER(guc) < MAKE_UC_VER(70, 0, 0)) {
+ ret = guc_lrc_desc_pool_create_v69(guc);
+ if (ret)
+ return ret;
+ }
+
+ guc->submission_state.guc_ids_bitmap =
+ bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
+ if (!guc->submission_state.guc_ids_bitmap) {
+ ret = -ENOMEM;
+ goto destroy_pool;
+ }
+
+ guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+ guc->timestamp.shift = gpm_timestamp_shift(gt);
+ guc->submission_initialized = true;
+
+ return 0;
+
+destroy_pool:
+ guc_lrc_desc_pool_destroy_v69(guc);
+
+ return ret;
+}
+
+void intel_guc_submission_fini(struct intel_guc *guc)
+{
+ if (!guc->submission_initialized)
+ return;
+
+ guc_flush_destroyed_contexts(guc);
+ guc_lrc_desc_pool_destroy_v69(guc);
+ i915_sched_engine_put(guc->sched_engine);
+ bitmap_free(guc->submission_state.guc_ids_bitmap);
+ guc->submission_initialized = false;
+}
+
+static inline void queue_request(struct i915_sched_engine *sched_engine,
+ struct i915_request *rq,
+ int prio)
+{
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(sched_engine, prio));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ tasklet_hi_schedule(&sched_engine->tasklet);
+}
+
+static int guc_bypass_tasklet_submit(struct intel_guc *guc,
+ struct i915_request *rq)
+{
+ int ret = 0;
+
+ __i915_request_submit(rq);
+
+ trace_i915_request_in(rq, 0);
+
+ if (is_multi_lrc_rq(rq)) {
+ if (multi_lrc_submit(rq)) {
+ ret = guc_wq_item_append(guc, rq);
+ if (!ret)
+ ret = guc_add_request(guc, rq);
+ }
+ } else {
+ guc_set_lrc_tail(rq);
+ ret = guc_add_request(guc, rq);
+ }
+
+ if (unlikely(ret == -EPIPE))
+ disable_submission(guc);
+
+ return ret;
+}
+
+static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
+{
+ struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
+ struct intel_context *ce = request_to_scheduling_context(rq);
+
+ return submission_disabled(guc) || guc->stalled_request ||
+ !i915_sched_engine_is_empty(sched_engine) ||
+ !ctx_id_mapped(guc, ce->guc_id.id);
+}
+
+static void guc_submit_request(struct i915_request *rq)
+{
+ struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
+ struct intel_guc *guc = &rq->engine->gt->uc.guc;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&sched_engine->lock, flags);
+
+ if (need_tasklet(guc, rq))
+ queue_request(sched_engine, rq, rq_prio(rq));
+ else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
+ tasklet_hi_schedule(&sched_engine->tasklet);
+
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ int ret;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ if (intel_context_is_parent(ce))
+ ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ order_base_2(ce->parallel.number_children
+ + 1));
+ else
+ ret = ida_simple_get(&guc->submission_state.guc_ids,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ guc->submission_state.num_guc_ids,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ce->guc_id.id = ret;
+ return 0;
+}
+
+static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ if (!context_guc_id_invalid(ce)) {
+ if (intel_context_is_parent(ce))
+ bitmap_release_region(guc->submission_state.guc_ids_bitmap,
+ ce->guc_id.id,
+ order_base_2(ce->parallel.number_children
+ + 1));
+ else
+ ida_simple_remove(&guc->submission_state.guc_ids,
+ ce->guc_id.id);
+ clr_ctx_id_mapping(guc, ce->guc_id.id);
+ set_context_guc_id_invalid(ce);
+ }
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
+}
+
+static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ __release_guc_id(guc, ce);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+}
+
+static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ struct intel_context *cn;
+
+ lockdep_assert_held(&guc->submission_state.lock);
+ GEM_BUG_ON(intel_context_is_child(ce));
+ GEM_BUG_ON(intel_context_is_parent(ce));
+
+ if (!list_empty(&guc->submission_state.guc_id_list)) {
+ cn = list_first_entry(&guc->submission_state.guc_id_list,
+ struct intel_context,
+ guc_id.link);
+
+ GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
+ GEM_BUG_ON(context_guc_id_invalid(cn));
+ GEM_BUG_ON(intel_context_is_child(cn));
+ GEM_BUG_ON(intel_context_is_parent(cn));
+
+ list_del_init(&cn->guc_id.link);
+ ce->guc_id.id = cn->guc_id.id;
+
+ spin_lock(&cn->guc_state.lock);
+ clr_context_registered(cn);
+ spin_unlock(&cn->guc_state.lock);
+
+ set_context_guc_id_invalid(cn);
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ guc->number_guc_id_stolen++;
+#endif
+
+ return 0;
+ } else {
+ return -EAGAIN;
+ }
+}
+
+static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ int ret;
+
+ lockdep_assert_held(&guc->submission_state.lock);
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ ret = new_guc_id(guc, ce);
+ if (unlikely(ret < 0)) {
+ if (intel_context_is_parent(ce))
+ return -ENOSPC;
+
+ ret = steal_guc_id(guc, ce);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (intel_context_is_parent(ce)) {
+ struct intel_context *child;
+ int i = 1;
+
+ for_each_child(ce, child)
+ child->guc_id.id = ce->guc_id.id + i++;
+ }
+
+ return 0;
+}
+
+#define PIN_GUC_ID_TRIES 4
+static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ int ret = 0;
+ unsigned long flags, tries = PIN_GUC_ID_TRIES;
+
+ GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
+
+try_again:
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+
+ might_lock(&ce->guc_state.lock);
+
+ if (context_guc_id_invalid(ce)) {
+ ret = assign_guc_id(guc, ce);
+ if (ret)
+ goto out_unlock;
+ ret = 1; /* Indidcates newly assigned guc_id */
+ }
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
+ atomic_inc(&ce->guc_id.ref);
+
+out_unlock:
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ /*
+ * -EAGAIN indicates no guc_id are available, let's retire any
+ * outstanding requests to see if that frees up a guc_id. If the first
+ * retire didn't help, insert a sleep with the timeslice duration before
+ * attempting to retire more requests. Double the sleep period each
+ * subsequent pass before finally giving up. The sleep period has max of
+ * 100ms and minimum of 1ms.
+ */
+ if (ret == -EAGAIN && --tries) {
+ if (PIN_GUC_ID_TRIES - tries > 1) {
+ unsigned int timeslice_shifted =
+ ce->engine->props.timeslice_duration_ms <<
+ (PIN_GUC_ID_TRIES - tries - 2);
+ unsigned int max = min_t(unsigned int, 100,
+ timeslice_shifted);
+
+ msleep(max_t(unsigned int, max, 1));
+ }
+ intel_gt_retire_requests(guc_to_gt(guc));
+ goto try_again;
+ }
+
+ return ret;
+}
+
+static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ unsigned long flags;
+
+ GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ if (unlikely(context_guc_id_invalid(ce) ||
+ intel_context_is_parent(ce)))
+ return;
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
+ !atomic_read(&ce->guc_id.ref))
+ list_add_tail(&ce->guc_id.link,
+ &guc->submission_state.guc_id_list);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+}
+
+static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
+ struct intel_context *ce,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ struct intel_context *child;
+ u32 action[4 + MAX_ENGINE_INSTANCE];
+ int len = 0;
+
+ GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+ action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+ action[len++] = guc_id;
+ action[len++] = ce->parallel.number_children + 1;
+ action[len++] = offset;
+ for_each_child(ce, child) {
+ offset += sizeof(struct guc_lrc_desc_v69);
+ action[len++] = offset;
+ }
+
+ return guc_submission_send_busy_loop(guc, action, len, 0, loop);
+}
+
+static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
+ struct intel_context *ce,
+ struct guc_ctxt_registration_info *info,
+ bool loop)
+{
+ struct intel_context *child;
+ u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
+ int len = 0;
+ u32 next_id;
+
+ GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+ action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+ action[len++] = info->flags;
+ action[len++] = info->context_idx;
+ action[len++] = info->engine_class;
+ action[len++] = info->engine_submit_mask;
+ action[len++] = info->wq_desc_lo;
+ action[len++] = info->wq_desc_hi;
+ action[len++] = info->wq_base_lo;
+ action[len++] = info->wq_base_hi;
+ action[len++] = info->wq_size;
+ action[len++] = ce->parallel.number_children + 1;
+ action[len++] = info->hwlrca_lo;
+ action[len++] = info->hwlrca_hi;
+
+ next_id = info->context_idx + 1;
+ for_each_child(ce, child) {
+ GEM_BUG_ON(next_id++ != child->guc_id.id);
+
+ /*
+ * NB: GuC interface supports 64 bit LRCA even though i915/HW
+ * only supports 32 bit currently.
+ */
+ action[len++] = lower_32_bits(child->lrc.lrca);
+ action[len++] = upper_32_bits(child->lrc.lrca);
+ }
+
+ GEM_BUG_ON(len > ARRAY_SIZE(action));
+
+ return guc_submission_send_busy_loop(guc, action, len, 0, loop);
+}
+
+static int __guc_action_register_context_v69(struct intel_guc *guc,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_REGISTER_CONTEXT,
+ guc_id,
+ offset,
+ };
+
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ 0, loop);
+}
+
+static int __guc_action_register_context_v70(struct intel_guc *guc,
+ struct guc_ctxt_registration_info *info,
+ bool loop)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_REGISTER_CONTEXT,
+ info->flags,
+ info->context_idx,
+ info->engine_class,
+ info->engine_submit_mask,
+ info->wq_desc_lo,
+ info->wq_desc_hi,
+ info->wq_base_lo,
+ info->wq_base_hi,
+ info->wq_size,
+ info->hwlrca_lo,
+ info->hwlrca_hi,
+ };
+
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ 0, loop);
+}
+
+static void prepare_context_registration_info_v69(struct intel_context *ce);
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info);
+
+static int
+register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
+{
+ u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
+ ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
+
+ prepare_context_registration_info_v69(ce);
+
+ if (intel_context_is_parent(ce))
+ return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
+ offset, loop);
+ else
+ return __guc_action_register_context_v69(guc, ce->guc_id.id,
+ offset, loop);
+}
+
+static int
+register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
+{
+ struct guc_ctxt_registration_info info;
+
+ prepare_context_registration_info_v70(ce, &info);
+
+ if (intel_context_is_parent(ce))
+ return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
+ else
+ return __guc_action_register_context_v70(guc, &info, loop);
+}
+
+static int register_context(struct intel_context *ce, bool loop)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ int ret;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+ trace_intel_context_register(ce);
+
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
+ ret = register_context_v70(guc, ce, loop);
+ else
+ ret = register_context_v69(guc, ce, loop);
+
+ if (likely(!ret)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ set_context_registered(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
+ guc_context_policy_init_v70(ce, loop);
+ }
+
+ return ret;
+}
+
+static int __guc_action_deregister_context(struct intel_guc *guc,
+ u32 guc_id)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
+ guc_id,
+ };
+
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_DEREGISTER_CONTEXT,
+ true);
+}
+
+static int deregister_context(struct intel_context *ce, u32 guc_id)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+ trace_intel_context_deregister(ce);
+
+ return __guc_action_deregister_context(guc, guc_id);
+}
+
+static inline void clear_children_join_go_memory(struct intel_context *ce)
+{
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+ int i;
+
+ ps->go.semaphore = 0;
+ for (i = 0; i < ce->parallel.number_children + 1; ++i)
+ ps->join[i].semaphore = 0;
+}
+
+static inline u32 get_children_go_value(struct intel_context *ce)
+{
+ return __get_parent_scratch(ce)->go.semaphore;
+}
+
+static inline u32 get_children_join_value(struct intel_context *ce,
+ u8 child_index)
+{
+ return __get_parent_scratch(ce)->join[child_index].semaphore;
+}
+
+struct context_policy {
+ u32 count;
+ struct guc_update_context_policy h2g;
+};
+
+static u32 __guc_context_policy_action_size(struct context_policy *policy)
+{
+ size_t bytes = sizeof(policy->h2g.header) +
+ (sizeof(policy->h2g.klv[0]) * policy->count);
+
+ return bytes / sizeof(u32);
+}
+
+static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id)
+{
+ policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
+ policy->h2g.header.ctx_id = guc_id;
+ policy->count = 0;
+}
+
+#define MAKE_CONTEXT_POLICY_ADD(func, id) \
+static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \
+{ \
+ GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
+ policy->h2g.klv[policy->count].kl = \
+ FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
+ FIELD_PREP(GUC_KLV_0_LEN, 1); \
+ policy->h2g.klv[policy->count].value = data; \
+ policy->count++; \
+}
+
+MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
+MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
+MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
+MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
+
+#undef MAKE_CONTEXT_POLICY_ADD
+
+static int __guc_context_set_context_policies(struct intel_guc *guc,
+ struct context_policy *policy,
+ bool loop)
+{
+ return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g,
+ __guc_context_policy_action_size(policy),
+ 0, loop);
+}
+
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct context_policy policy;
+ u32 execution_quantum;
+ u32 preemption_timeout;
+ unsigned long flags;
+ int ret;
+
+ /* NB: For both of these, zero means disabled. */
+ GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
+ execution_quantum));
+ GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
+ preemption_timeout));
+ execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ __guc_context_policy_add_preempt_to_idle(&policy, 1);
+
+ ret = __guc_context_set_context_policies(guc, &policy, loop);
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ if (ret != 0)
+ set_context_policy_required(ce);
+ else
+ clr_context_policy_required(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ return ret;
+}
+
+static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+ struct guc_lrc_desc_v69 *desc)
+{
+ desc->policy_flags = 0;
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+
+ /* NB: For both of these, zero means disabled. */
+ GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
+ desc->execution_quantum));
+ GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
+ desc->preemption_timeout));
+ desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+}
+
+static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
+{
+ /*
+ * this matches the mapping we do in map_i915_prio_to_guc_prio()
+ * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
+ */
+ switch (prio) {
+ default:
+ MISSING_CASE(prio);
+ fallthrough;
+ case GUC_CLIENT_PRIORITY_KMD_NORMAL:
+ return GEN12_CTX_PRIORITY_NORMAL;
+ case GUC_CLIENT_PRIORITY_NORMAL:
+ return GEN12_CTX_PRIORITY_LOW;
+ case GUC_CLIENT_PRIORITY_HIGH:
+ case GUC_CLIENT_PRIORITY_KMD_HIGH:
+ return GEN12_CTX_PRIORITY_HIGH;
+ }
+}
+
+static void prepare_context_registration_info_v69(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u32 ctx_id = ce->guc_id.id;
+ struct guc_lrc_desc_v69 *desc;
+ struct intel_context *child;
+
+ GEM_BUG_ON(!engine->mask);
+
+ /*
+ * Ensure LRC + CT vmas are is same region as write barrier is done
+ * based on CT vma region.
+ */
+ GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+ i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+ desc = __get_lrc_desc_v69(guc, ctx_id);
+ desc->engine_class = engine_class_to_guc_class(engine->class);
+ desc->engine_submit_mask = engine->logical_mask;
+ desc->hw_context_desc = ce->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init_v69(engine, desc);
+
+ /*
+ * If context is a parent, we need to register a process descriptor
+ * describing a work queue and register all child contexts.
+ */
+ if (intel_context_is_parent(ce)) {
+ struct guc_process_desc_v69 *pdesc;
+
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+ desc->process_desc = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ desc->wq_addr = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ desc->wq_size = WQ_SIZE;
+
+ pdesc = __get_process_desc_v69(ce);
+ memset(pdesc, 0, sizeof(*(pdesc)));
+ pdesc->stage_id = ce->guc_id.id;
+ pdesc->wq_base_addr = desc->wq_addr;
+ pdesc->wq_size_bytes = desc->wq_size;
+ pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+ ce->parallel.guc.wq_head = &pdesc->head;
+ ce->parallel.guc.wq_tail = &pdesc->tail;
+ ce->parallel.guc.wq_status = &pdesc->wq_status;
+
+ for_each_child(ce, child) {
+ desc = __get_lrc_desc_v69(guc, child->guc_id.id);
+
+ desc->engine_class =
+ engine_class_to_guc_class(engine->class);
+ desc->hw_context_desc = child->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init_v69(engine, desc);
+ }
+
+ clear_children_join_go_memory(ce);
+ }
+}
+
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u32 ctx_id = ce->guc_id.id;
+
+ GEM_BUG_ON(!engine->mask);
+
+ /*
+ * Ensure LRC + CT vmas are is same region as write barrier is done
+ * based on CT vma region.
+ */
+ GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+ i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+ memset(info, 0, sizeof(*info));
+ info->context_idx = ctx_id;
+ info->engine_class = engine_class_to_guc_class(engine->class);
+ info->engine_submit_mask = engine->logical_mask;
+ /*
+ * NB: GuC interface supports 64 bit LRCA even though i915/HW
+ * only supports 32 bit currently.
+ */
+ info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
+ info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+ if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
+ info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
+ info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
+
+ /*
+ * If context is a parent, we need to register a process descriptor
+ * describing a work queue and register all child contexts.
+ */
+ if (intel_context_is_parent(ce)) {
+ struct guc_sched_wq_desc *wq_desc;
+ u64 wq_desc_offset, wq_base_offset;
+
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+ wq_desc_offset = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ wq_base_offset = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ info->wq_desc_lo = lower_32_bits(wq_desc_offset);
+ info->wq_desc_hi = upper_32_bits(wq_desc_offset);
+ info->wq_base_lo = lower_32_bits(wq_base_offset);
+ info->wq_base_hi = upper_32_bits(wq_base_offset);
+ info->wq_size = WQ_SIZE;
+
+ wq_desc = __get_wq_desc_v70(ce);
+ memset(wq_desc, 0, sizeof(*wq_desc));
+ wq_desc->wq_status = WQ_STATUS_ACTIVE;
+
+ ce->parallel.guc.wq_head = &wq_desc->head;
+ ce->parallel.guc.wq_tail = &wq_desc->tail;
+ ce->parallel.guc.wq_status = &wq_desc->wq_status;
+
+ clear_children_join_go_memory(ce);
+ }
+}
+
+static int try_context_registration(struct intel_context *ce, bool loop)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ intel_wakeref_t wakeref;
+ u32 ctx_id = ce->guc_id.id;
+ bool context_registered;
+ int ret = 0;
+
+ GEM_BUG_ON(!sched_state_is_init(ce));
+
+ context_registered = ctx_id_mapped(guc, ctx_id);
+
+ clr_ctx_id_mapping(guc, ctx_id);
+ set_ctx_id_mapping(guc, ctx_id, ce);
+
+ /*
+ * The context_lookup xarray is used to determine if the hardware
+ * context is currently registered. There are two cases in which it
+ * could be registered either the guc_id has been stolen from another
+ * context or the lrc descriptor address of this context has changed. In
+ * either case the context needs to be deregistered with the GuC before
+ * registering this context.
+ */
+ if (context_registered) {
+ bool disabled;
+ unsigned long flags;
+
+ trace_intel_context_steal_guc_id(ce);
+ GEM_BUG_ON(!loop);
+
+ /* Seal race with Reset */
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ disabled = submission_disabled(guc);
+ if (likely(!disabled)) {
+ set_context_wait_for_deregister_to_register(ce);
+ intel_context_get(ce);
+ }
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ if (unlikely(disabled)) {
+ clr_ctx_id_mapping(guc, ctx_id);
+ return 0; /* Will get registered later */
+ }
+
+ /*
+ * If stealing the guc_id, this ce has the same guc_id as the
+ * context whose guc_id was stolen.
+ */
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ ret = deregister_context(ce, ce->guc_id.id);
+ if (unlikely(ret == -ENODEV))
+ ret = 0; /* Will get registered later */
+ } else {
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ ret = register_context(ce, loop);
+ if (unlikely(ret == -EBUSY)) {
+ clr_ctx_id_mapping(guc, ctx_id);
+ } else if (unlikely(ret == -ENODEV)) {
+ clr_ctx_id_mapping(guc, ctx_id);
+ ret = 0; /* Will get registered later */
+ }
+ }
+
+ return ret;
+}
+
+static int __guc_context_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return lrc_pre_pin(ce, engine, ww, vaddr);
+}
+
+static int __guc_context_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr)
+{
+ if (i915_ggtt_offset(ce->state) !=
+ (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
+ set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
+
+ /*
+ * GuC context gets pinned in guc_request_alloc. See that function for
+ * explaination of why.
+ */
+
+ return lrc_pin(ce, engine, vaddr);
+}
+
+static int guc_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int guc_context_pin(struct intel_context *ce, void *vaddr)
+{
+ int ret = __guc_context_pin(ce, ce->engine, vaddr);
+
+ if (likely(!ret && !intel_context_is_barrier(ce)))
+ intel_engine_pm_get(ce->engine);
+
+ return ret;
+}
+
+static void guc_context_unpin(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ unpin_guc_id(guc, ce);
+ lrc_unpin(ce);
+
+ if (likely(!intel_context_is_barrier(ce)))
+ intel_engine_pm_put_async(ce->engine);
+}
+
+static void guc_context_post_unpin(struct intel_context *ce)
+{
+ lrc_post_unpin(ce);
+}
+
+static void __guc_context_sched_enable(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
+ ce->guc_id.id,
+ GUC_CONTEXT_ENABLE
+ };
+
+ trace_intel_context_sched_enable(ce);
+
+ guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
+}
+
+static void __guc_context_sched_disable(struct intel_guc *guc,
+ struct intel_context *ce,
+ u16 guc_id)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
+ guc_id, /* ce->guc_id.id not stable */
+ GUC_CONTEXT_DISABLE
+ };
+
+ GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID);
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+ trace_intel_context_sched_disable(ce);
+
+ guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
+}
+
+static void guc_blocked_fence_complete(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ if (!i915_sw_fence_done(&ce->guc_state.blocked))
+ i915_sw_fence_complete(&ce->guc_state.blocked);
+}
+
+static void guc_blocked_fence_reinit(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
+
+ /*
+ * This fence is always complete unless a pending schedule disable is
+ * outstanding. We arm the fence here and complete it when we receive
+ * the pending schedule disable complete message.
+ */
+ i915_sw_fence_fini(&ce->guc_state.blocked);
+ i915_sw_fence_reinit(&ce->guc_state.blocked);
+ i915_sw_fence_await(&ce->guc_state.blocked);
+ i915_sw_fence_commit(&ce->guc_state.blocked);
+}
+
+static u16 prep_context_pending_disable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ set_context_pending_disable(ce);
+ clr_context_enabled(ce);
+ guc_blocked_fence_reinit(ce);
+ intel_context_get(ce);
+
+ return ce->guc_id.id;
+}
+
+static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ unsigned long flags;
+ struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
+ intel_wakeref_t wakeref;
+ u16 guc_id;
+ bool enabled;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+ incr_context_blocked(ce);
+
+ enabled = context_enabled(ce);
+ if (unlikely(!enabled || submission_disabled(guc))) {
+ if (enabled)
+ clr_context_enabled(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ return &ce->guc_state.blocked;
+ }
+
+ /*
+ * We add +2 here as the schedule disable complete CTB handler calls
+ * intel_context_sched_disable_unpin (-2 to pin_count).
+ */
+ atomic_add(2, &ce->pin_count);
+
+ guc_id = prep_context_pending_disable(ce);
+
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ __guc_context_sched_disable(guc, ce, guc_id);
+
+ return &ce->guc_state.blocked;
+}
+
+#define SCHED_STATE_MULTI_BLOCKED_MASK \
+ (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
+#define SCHED_STATE_NO_UNBLOCK \
+ (SCHED_STATE_MULTI_BLOCKED_MASK | \
+ SCHED_STATE_PENDING_DISABLE | \
+ SCHED_STATE_BANNED)
+
+static bool context_cant_unblock(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
+ context_guc_id_invalid(ce) ||
+ !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
+ !intel_context_is_pinned(ce);
+}
+
+static void guc_context_unblock(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ unsigned long flags;
+ struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
+ intel_wakeref_t wakeref;
+ bool enable;
+
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+ if (unlikely(submission_disabled(guc) ||
+ context_cant_unblock(ce))) {
+ enable = false;
+ } else {
+ enable = true;
+ set_context_pending_enable(ce);
+ set_context_enabled(ce);
+ intel_context_get(ce);
+ }
+
+ decr_context_blocked(ce);
+
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ if (enable) {
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ __guc_context_sched_enable(guc, ce);
+ }
+}
+
+static void guc_context_cancel_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ struct intel_context *block_context =
+ request_to_scheduling_context(rq);
+
+ if (i915_sw_fence_signaled(&rq->submit)) {
+ struct i915_sw_fence *fence;
+
+ intel_context_get(ce);
+ fence = guc_context_block(block_context);
+ i915_sw_fence_wait(fence);
+ if (!i915_request_completed(rq)) {
+ __i915_request_skip(rq);
+ guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
+ true);
+ }
+
+ guc_context_unblock(block_context);
+ intel_context_put(ce);
+ }
+}
+
+static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
+ u16 guc_id,
+ u32 preemption_timeout)
+{
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
+ struct context_policy policy;
+
+ __guc_context_policy_start_klv(&policy, guc_id);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_set_context_policies(guc, &policy, true);
+ } else {
+ u32 action[] = {
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
+ guc_id,
+ preemption_timeout
+ };
+
+ intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ }
+}
+
+static void
+guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
+ unsigned int preempt_timeout_ms)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ struct intel_runtime_pm *runtime_pm =
+ &ce->engine->gt->i915->runtime_pm;
+ intel_wakeref_t wakeref;
+ unsigned long flags;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ guc_flush_submissions(guc);
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ set_context_banned(ce);
+
+ if (submission_disabled(guc) ||
+ (!context_enabled(ce) && !context_pending_disable(ce))) {
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ guc_cancel_context_requests(ce);
+ intel_engine_signal_breadcrumbs(ce->engine);
+ } else if (!context_pending_disable(ce)) {
+ u16 guc_id;
+
+ /*
+ * We add +2 here as the schedule disable complete CTB handler
+ * calls intel_context_sched_disable_unpin (-2 to pin_count).
+ */
+ atomic_add(2, &ce->pin_count);
+
+ guc_id = prep_context_pending_disable(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ /*
+ * In addition to disabling scheduling, set the preemption
+ * timeout to the minimum value (1 us) so the banned context
+ * gets kicked off the HW ASAP.
+ */
+ with_intel_runtime_pm(runtime_pm, wakeref) {
+ __guc_context_set_preemption_timeout(guc, guc_id,
+ preempt_timeout_ms);
+ __guc_context_sched_disable(guc, ce, guc_id);
+ }
+ } else {
+ if (!context_guc_id_invalid(ce))
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ __guc_context_set_preemption_timeout(guc,
+ ce->guc_id.id,
+ preempt_timeout_ms);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ }
+}
+
+static void guc_context_sched_disable(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ unsigned long flags;
+ struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
+ intel_wakeref_t wakeref;
+ u16 guc_id;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+ /*
+ * We have to check if the context has been disabled by another thread,
+ * check if submssion has been disabled to seal a race with reset and
+ * finally check if any more requests have been committed to the
+ * context ensursing that a request doesn't slip through the
+ * 'context_pending_disable' fence.
+ */
+ if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
+ context_has_committed_requests(ce))) {
+ clr_context_enabled(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ goto unpin;
+ }
+ guc_id = prep_context_pending_disable(ce);
+
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ __guc_context_sched_disable(guc, ce, guc_id);
+
+ return;
+unpin:
+ intel_context_sched_disable_unpin(ce);
+}
+
+static inline void guc_lrc_desc_unpin(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ struct intel_gt *gt = guc_to_gt(guc);
+ unsigned long flags;
+ bool disabled;
+
+ GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
+ GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
+ GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
+ GEM_BUG_ON(context_enabled(ce));
+
+ /* Seal race with Reset */
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ disabled = submission_disabled(guc);
+ if (likely(!disabled)) {
+ __intel_gt_pm_get(gt);
+ set_context_destroyed(ce);
+ clr_context_registered(ce);
+ }
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ if (unlikely(disabled)) {
+ release_guc_id(guc, ce);
+ __guc_context_destroy(ce);
+ return;
+ }
+
+ deregister_context(ce, ce->guc_id.id);
+}
+
+static void __guc_context_destroy(struct intel_context *ce)
+{
+ GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+ GEM_BUG_ON(ce->guc_state.number_committed_requests);
+
+ lrc_fini(ce);
+ intel_context_fini(ce);
+
+ if (intel_engine_is_virtual(ce->engine)) {
+ struct guc_virtual_engine *ve =
+ container_of(ce, typeof(*ve), context);
+
+ if (ve->base.breadcrumbs)
+ intel_breadcrumbs_put(ve->base.breadcrumbs);
+
+ kfree(ve);
+ } else {
+ intel_context_free(ce);
+ }
+}
+
+static void guc_flush_destroyed_contexts(struct intel_guc *guc)
+{
+ struct intel_context *ce;
+ unsigned long flags;
+
+ GEM_BUG_ON(!submission_disabled(guc) &&
+ guc_submission_initialized(guc));
+
+ while (!list_empty(&guc->submission_state.destroyed_contexts)) {
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
+ struct intel_context,
+ destroyed_link);
+ if (ce)
+ list_del_init(&ce->destroyed_link);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (!ce)
+ break;
+
+ release_guc_id(guc, ce);
+ __guc_context_destroy(ce);
+ }
+}
+
+static void deregister_destroyed_contexts(struct intel_guc *guc)
+{
+ struct intel_context *ce;
+ unsigned long flags;
+
+ while (!list_empty(&guc->submission_state.destroyed_contexts)) {
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
+ struct intel_context,
+ destroyed_link);
+ if (ce)
+ list_del_init(&ce->destroyed_link);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (!ce)
+ break;
+
+ guc_lrc_desc_unpin(ce);
+ }
+}
+
+static void destroyed_worker_func(struct work_struct *w)
+{
+ struct intel_guc *guc = container_of(w, struct intel_guc,
+ submission_state.destroyed_worker);
+ struct intel_gt *gt = guc_to_gt(guc);
+ int tmp;
+
+ with_intel_gt_pm(gt, tmp)
+ deregister_destroyed_contexts(guc);
+}
+
+static void guc_context_destroy(struct kref *kref)
+{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+ struct intel_guc *guc = ce_to_guc(ce);
+ unsigned long flags;
+ bool destroy;
+
+ /*
+ * If the guc_id is invalid this context has been stolen and we can free
+ * it immediately. Also can be freed immediately if the context is not
+ * registered with the GuC or the GuC is in the middle of a reset.
+ */
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
+ !ctx_id_mapped(guc, ce->guc_id.id);
+ if (likely(!destroy)) {
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
+ list_add_tail(&ce->destroyed_link,
+ &guc->submission_state.destroyed_contexts);
+ } else {
+ __release_guc_id(guc, ce);
+ }
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+ if (unlikely(destroy)) {
+ __guc_context_destroy(ce);
+ return;
+ }
+
+ /*
+ * We use a worker to issue the H2G to deregister the context as we can
+ * take the GT PM for the first time which isn't allowed from an atomic
+ * context.
+ */
+ queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
+}
+
+static int guc_context_alloc(struct intel_context *ce)
+{
+ return lrc_alloc(ce, ce->engine);
+}
+
+static void __guc_context_set_prio(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
+ struct context_policy policy;
+
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_set_context_policies(guc, &policy, true);
+ } else {
+ u32 action[] = {
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
+ ce->guc_id.id,
+ ce->guc_state.prio,
+ };
+
+ guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ }
+}
+
+static void guc_context_set_prio(struct intel_guc *guc,
+ struct intel_context *ce,
+ u8 prio)
+{
+ GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
+ prio > GUC_CLIENT_PRIORITY_NORMAL);
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ if (ce->guc_state.prio == prio || submission_disabled(guc) ||
+ !context_registered(ce)) {
+ ce->guc_state.prio = prio;
+ return;
+ }
+
+ ce->guc_state.prio = prio;
+ __guc_context_set_prio(guc, ce);
+
+ trace_intel_context_set_prio(ce);
+}
+
+static inline u8 map_i915_prio_to_guc_prio(int prio)
+{
+ if (prio == I915_PRIORITY_NORMAL)
+ return GUC_CLIENT_PRIORITY_KMD_NORMAL;
+ else if (prio < I915_PRIORITY_NORMAL)
+ return GUC_CLIENT_PRIORITY_NORMAL;
+ else if (prio < I915_PRIORITY_DISPLAY)
+ return GUC_CLIENT_PRIORITY_HIGH;
+ else
+ return GUC_CLIENT_PRIORITY_KMD_HIGH;
+}
+
+static inline void add_context_inflight_prio(struct intel_context *ce,
+ u8 guc_prio)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
+
+ ++ce->guc_state.prio_count[guc_prio];
+
+ /* Overflow protection */
+ GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
+}
+
+static inline void sub_context_inflight_prio(struct intel_context *ce,
+ u8 guc_prio)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
+
+ /* Underflow protection */
+ GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
+
+ --ce->guc_state.prio_count[guc_prio];
+}
+
+static inline void update_context_prio(struct intel_context *ce)
+{
+ struct intel_guc *guc = &ce->engine->gt->uc.guc;
+ int i;
+
+ BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
+ BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
+
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
+ if (ce->guc_state.prio_count[i]) {
+ guc_context_set_prio(guc, ce, i);
+ break;
+ }
+ }
+}
+
+static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
+{
+ /* Lower value is higher priority */
+ return new_guc_prio < old_guc_prio;
+}
+
+static void add_to_context(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+ GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
+
+ spin_lock(&ce->guc_state.lock);
+ list_move_tail(&rq->sched.link, &ce->guc_state.requests);
+
+ if (rq->guc_prio == GUC_PRIO_INIT) {
+ rq->guc_prio = new_guc_prio;
+ add_context_inflight_prio(ce, rq->guc_prio);
+ } else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
+ sub_context_inflight_prio(ce, rq->guc_prio);
+ rq->guc_prio = new_guc_prio;
+ add_context_inflight_prio(ce, rq->guc_prio);
+ }
+ update_context_prio(ce);
+
+ spin_unlock(&ce->guc_state.lock);
+}
+
+static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ if (rq->guc_prio != GUC_PRIO_INIT &&
+ rq->guc_prio != GUC_PRIO_FINI) {
+ sub_context_inflight_prio(ce, rq->guc_prio);
+ update_context_prio(ce);
+ }
+ rq->guc_prio = GUC_PRIO_FINI;
+}
+
+static void remove_from_context(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ spin_lock_irq(&ce->guc_state.lock);
+
+ list_del_init(&rq->sched.link);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Prevent further __await_execution() registering a cb, then flush */
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+
+ guc_prio_fini(rq, ce);
+
+ decr_context_committed_requests(ce);
+
+ spin_unlock_irq(&ce->guc_state.lock);
+
+ atomic_dec(&ce->guc_id.ref);
+ i915_request_notify_execute_cb_imm(rq);
+}
+
+static const struct intel_context_ops guc_context_ops = {
+ .alloc = guc_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_context_pin,
+ .unpin = guc_context_unpin,
+ .post_unpin = guc_context_post_unpin,
+
+ .revoke = guc_context_revoke,
+
+ .cancel_request = guc_context_cancel_request,
+
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
+ .sched_disable = guc_context_sched_disable,
+
+ .reset = lrc_reset,
+ .destroy = guc_context_destroy,
+
+ .create_virtual = guc_create_virtual,
+ .create_parallel = guc_create_parallel,
+};
+
+static void submit_work_cb(struct irq_work *wrk)
+{
+ struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+ might_lock(&rq->engine->sched_engine->lock);
+ i915_sw_fence_complete(&rq->submit);
+}
+
+static void __guc_signal_context_fence(struct intel_context *ce)
+{
+ struct i915_request *rq, *rn;
+
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ if (!list_empty(&ce->guc_state.fences))
+ trace_intel_context_fence_release(ce);
+
+ /*
+ * Use an IRQ to ensure locking order of sched_engine->lock ->
+ * ce->guc_state.lock is preserved.
+ */
+ list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
+ guc_fence_link) {
+ list_del(&rq->guc_fence_link);
+ irq_work_queue(&rq->submit_work);
+ }
+
+ INIT_LIST_HEAD(&ce->guc_state.fences);
+}
+
+static void guc_signal_context_fence(struct intel_context *ce)
+{
+ unsigned long flags;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ clr_context_wait_for_deregister_to_register(ce);
+ __guc_signal_context_fence(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+}
+
+static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
+{
+ return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
+ !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
+ !submission_disabled(ce_to_guc(ce));
+}
+
+static void guc_context_init(struct intel_context *ce)
+{
+ const struct i915_gem_context *ctx;
+ int prio = I915_CONTEXT_DEFAULT_PRIORITY;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(ce->gem_context);
+ if (ctx)
+ prio = ctx->sched.priority;
+ rcu_read_unlock();
+
+ ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
+ set_bit(CONTEXT_GUC_INIT, &ce->flags);
+}
+
+static int guc_request_alloc(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ struct intel_guc *guc = ce_to_guc(ce);
+ unsigned long flags;
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_pinned(rq->context));
+
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ rq->reserved_space += GUC_REQUEST_SIZE;
+
+ /*
+ * Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (ret)
+ return ret;
+
+ rq->reserved_space -= GUC_REQUEST_SIZE;
+
+ if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
+ guc_context_init(ce);
+
+ /*
+ * Call pin_guc_id here rather than in the pinning step as with
+ * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
+ * guc_id and creating horrible race conditions. This is especially bad
+ * when guc_id are being stolen due to over subscription. By the time
+ * this function is reached, it is guaranteed that the guc_id will be
+ * persistent until the generated request is retired. Thus, sealing these
+ * race conditions. It is still safe to fail here if guc_id are
+ * exhausted and return -EAGAIN to the user indicating that they can try
+ * again in the future.
+ *
+ * There is no need for a lock here as the timeline mutex ensures at
+ * most one context can be executing this code path at once. The
+ * guc_id_ref is incremented once for every request in flight and
+ * decremented on each retire. When it is zero, a lock around the
+ * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
+ */
+ if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
+ goto out;
+
+ ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
+ if (unlikely(ret < 0))
+ return ret;
+ if (context_needs_register(ce, !!ret)) {
+ ret = try_context_registration(ce, true);
+ if (unlikely(ret)) { /* unwind */
+ if (ret == -EPIPE) {
+ disable_submission(guc);
+ goto out; /* GPU will be reset */
+ }
+ atomic_dec(&ce->guc_id.ref);
+ unpin_guc_id(guc, ce);
+ return ret;
+ }
+ }
+
+ clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
+
+out:
+ /*
+ * We block all requests on this context if a G2H is pending for a
+ * schedule disable or context deregistration as the GuC will fail a
+ * schedule enable or context registration if either G2H is pending
+ * respectfully. Once a G2H returns, the fence is released that is
+ * blocking these requests (see guc_signal_context_fence).
+ */
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ if (context_wait_for_deregister_to_register(ce) ||
+ context_pending_disable(ce)) {
+ init_irq_work(&rq->submit_work, submit_work_cb);
+ i915_sw_fence_await(&rq->submit);
+
+ list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
+ }
+ incr_context_committed_requests(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ return 0;
+}
+
+static int guc_virtual_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+
+ return __guc_context_pre_pin(ce, engine, ww, vaddr);
+}
+
+static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+ int ret = __guc_context_pin(ce, engine, vaddr);
+ intel_engine_mask_t tmp, mask = ce->engine->mask;
+
+ if (likely(!ret))
+ for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+ intel_engine_pm_get(engine);
+
+ return ret;
+}
+
+static void guc_virtual_context_unpin(struct intel_context *ce)
+{
+ intel_engine_mask_t tmp, mask = ce->engine->mask;
+ struct intel_engine_cs *engine;
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+
+ unpin_guc_id(guc, ce);
+ lrc_unpin(ce);
+
+ for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+ intel_engine_pm_put_async(engine);
+}
+
+static void guc_virtual_context_enter(struct intel_context *ce)
+{
+ intel_engine_mask_t tmp, mask = ce->engine->mask;
+ struct intel_engine_cs *engine;
+
+ for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+ intel_engine_pm_get(engine);
+
+ intel_timeline_enter(ce->timeline);
+}
+
+static void guc_virtual_context_exit(struct intel_context *ce)
+{
+ intel_engine_mask_t tmp, mask = ce->engine->mask;
+ struct intel_engine_cs *engine;
+
+ for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+ intel_engine_pm_put(engine);
+
+ intel_timeline_exit(ce->timeline);
+}
+
+static int guc_virtual_context_alloc(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+
+ return lrc_alloc(ce, engine);
+}
+
+static const struct intel_context_ops virtual_guc_context_ops = {
+ .alloc = guc_virtual_context_alloc,
+
+ .pre_pin = guc_virtual_context_pre_pin,
+ .pin = guc_virtual_context_pin,
+ .unpin = guc_virtual_context_unpin,
+ .post_unpin = guc_context_post_unpin,
+
+ .revoke = guc_context_revoke,
+
+ .cancel_request = guc_context_cancel_request,
+
+ .enter = guc_virtual_context_enter,
+ .exit = guc_virtual_context_exit,
+
+ .sched_disable = guc_context_sched_disable,
+
+ .destroy = guc_context_destroy,
+
+ .get_sibling = guc_virtual_get_sibling,
+};
+
+static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+ struct intel_guc *guc = ce_to_guc(ce);
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ ret = pin_guc_id(guc, ce);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return __guc_context_pin(ce, engine, vaddr);
+}
+
+static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ __intel_context_pin(ce->parallel.parent);
+ return __guc_context_pin(ce, engine, vaddr);
+}
+
+static void guc_parent_context_unpin(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ unpin_guc_id(guc, ce);
+ lrc_unpin(ce);
+}
+
+static void guc_child_context_unpin(struct intel_context *ce)
+{
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ lrc_unpin(ce);
+}
+
+static void guc_child_context_post_unpin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ lrc_post_unpin(ce);
+ intel_context_unpin(ce->parallel.parent);
+}
+
+static void guc_child_context_destroy(struct kref *kref)
+{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+ __guc_context_destroy(ce);
+}
+
+static const struct intel_context_ops virtual_parent_context_ops = {
+ .alloc = guc_virtual_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_parent_context_pin,
+ .unpin = guc_parent_context_unpin,
+ .post_unpin = guc_context_post_unpin,
+
+ .revoke = guc_context_revoke,
+
+ .cancel_request = guc_context_cancel_request,
+
+ .enter = guc_virtual_context_enter,
+ .exit = guc_virtual_context_exit,
+
+ .sched_disable = guc_context_sched_disable,
+
+ .destroy = guc_context_destroy,
+
+ .get_sibling = guc_virtual_get_sibling,
+};
+
+static const struct intel_context_ops virtual_child_context_ops = {
+ .alloc = guc_virtual_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_child_context_pin,
+ .unpin = guc_child_context_unpin,
+ .post_unpin = guc_child_context_post_unpin,
+
+ .cancel_request = guc_context_cancel_request,
+
+ .enter = guc_virtual_context_enter,
+ .exit = guc_virtual_context_exit,
+
+ .destroy = guc_child_context_destroy,
+
+ .get_sibling = guc_virtual_get_sibling,
+};
+
+/*
+ * The below override of the breadcrumbs is enabled when the user configures a
+ * context for parallel submission (multi-lrc, parent-child).
+ *
+ * The overridden breadcrumbs implements an algorithm which allows the GuC to
+ * safely preempt all the hw contexts configured for parallel submission
+ * between each BB. The contract between the i915 and GuC is if the parent
+ * context can be preempted, all the children can be preempted, and the GuC will
+ * always try to preempt the parent before the children. A handshake between the
+ * parent / children breadcrumbs ensures the i915 holds up its end of the deal
+ * creating a window to preempt between each set of BBs.
+ */
+static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+static u32 *
+emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs);
+static u32 *
+emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs);
+
+static struct intel_context *
+guc_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width)
+{
+ struct intel_engine_cs **siblings = NULL;
+ struct intel_context *parent = NULL, *ce, *err;
+ int i, j;
+
+ siblings = kmalloc_array(num_siblings,
+ sizeof(*siblings),
+ GFP_KERNEL);
+ if (!siblings)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < width; ++i) {
+ for (j = 0; j < num_siblings; ++j)
+ siblings[j] = engines[i * num_siblings + j];
+
+ ce = intel_engine_create_virtual(siblings, num_siblings,
+ FORCE_VIRTUAL);
+ if (IS_ERR(ce)) {
+ err = ERR_CAST(ce);
+ goto unwind;
+ }
+
+ if (i == 0) {
+ parent = ce;
+ parent->ops = &virtual_parent_context_ops;
+ } else {
+ ce->ops = &virtual_child_context_ops;
+ intel_context_bind_parent_child(parent, ce);
+ }
+ }
+
+ parent->parallel.fence_context = dma_fence_context_alloc(1);
+
+ parent->engine->emit_bb_start =
+ emit_bb_start_parent_no_preempt_mid_batch;
+ parent->engine->emit_fini_breadcrumb =
+ emit_fini_breadcrumb_parent_no_preempt_mid_batch;
+ parent->engine->emit_fini_breadcrumb_dw =
+ 12 + 4 * parent->parallel.number_children;
+ for_each_child(parent, ce) {
+ ce->engine->emit_bb_start =
+ emit_bb_start_child_no_preempt_mid_batch;
+ ce->engine->emit_fini_breadcrumb =
+ emit_fini_breadcrumb_child_no_preempt_mid_batch;
+ ce->engine->emit_fini_breadcrumb_dw = 16;
+ }
+
+ kfree(siblings);
+ return parent;
+
+unwind:
+ if (parent)
+ intel_context_put(parent);
+ kfree(siblings);
+ return err;
+}
+
+static bool
+guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
+{
+ struct intel_engine_cs *sibling;
+ intel_engine_mask_t tmp, mask = b->engine_mask;
+ bool result = false;
+
+ for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
+ result |= intel_engine_irq_enable(sibling);
+
+ return result;
+}
+
+static void
+guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
+{
+ struct intel_engine_cs *sibling;
+ intel_engine_mask_t tmp, mask = b->engine_mask;
+
+ for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
+ intel_engine_irq_disable(sibling);
+}
+
+static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
+{
+ int i;
+
+ /*
+ * In GuC submission mode we do not know which physical engine a request
+ * will be scheduled on, this creates a problem because the breadcrumb
+ * interrupt is per physical engine. To work around this we attach
+ * requests and direct all breadcrumb interrupts to the first instance
+ * of an engine per class. In addition all breadcrumb interrupts are
+ * enabled / disabled across an engine class in unison.
+ */
+ for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
+ struct intel_engine_cs *sibling =
+ engine->gt->engine_class[engine->class][i];
+
+ if (sibling) {
+ if (engine->breadcrumbs != sibling->breadcrumbs) {
+ intel_breadcrumbs_put(engine->breadcrumbs);
+ engine->breadcrumbs =
+ intel_breadcrumbs_get(sibling->breadcrumbs);
+ }
+ break;
+ }
+ }
+
+ if (engine->breadcrumbs) {
+ engine->breadcrumbs->engine_mask |= engine->mask;
+ engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
+ engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
+ }
+}
+
+static void guc_bump_inflight_request_prio(struct i915_request *rq,
+ int prio)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
+
+ /* Short circuit function */
+ if (prio < I915_PRIORITY_NORMAL ||
+ rq->guc_prio == GUC_PRIO_FINI ||
+ (rq->guc_prio != GUC_PRIO_INIT &&
+ !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
+ return;
+
+ spin_lock(&ce->guc_state.lock);
+ if (rq->guc_prio != GUC_PRIO_FINI) {
+ if (rq->guc_prio != GUC_PRIO_INIT)
+ sub_context_inflight_prio(ce, rq->guc_prio);
+ rq->guc_prio = new_guc_prio;
+ add_context_inflight_prio(ce, rq->guc_prio);
+ update_context_prio(ce);
+ }
+ spin_unlock(&ce->guc_state.lock);
+}
+
+static void guc_retire_inflight_request_prio(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+
+ spin_lock(&ce->guc_state.lock);
+ guc_prio_fini(rq, ce);
+ spin_unlock(&ce->guc_state.lock);
+}
+
+static void sanitize_hwsp(struct intel_engine_cs *engine)
+{
+ struct intel_timeline *tl;
+
+ list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
+ intel_timeline_reset_seqno(tl);
+}
+
+static void guc_sanitize(struct intel_engine_cs *engine)
+{
+ /*
+ * Poison residual state on resume, in case the suspend didn't!
+ *
+ * We have to assume that across suspend/resume (or other loss
+ * of control) that the contents of our pinned buffers has been
+ * lost, replaced by garbage. Since this doesn't always happen,
+ * let's poison such state so that we more quickly spot when
+ * we falsely assume it has been preserved.
+ */
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
+
+ /*
+ * The kernel_context HWSP is stored in the status_page. As above,
+ * that may be lost on resume/initialisation, and so we need to
+ * reset the value in the HWSP.
+ */
+ sanitize_hwsp(engine);
+
+ /* And scrub the dirty cachelines for the HWSP */
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
+}
+
+static void setup_hwsp(struct intel_engine_cs *engine)
+{
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
+
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+}
+
+static void start_engine(struct intel_engine_cs *engine)
+{
+ ENGINE_WRITE_FW(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_POSTING_READ(engine, RING_MI_MODE);
+}
+
+static int guc_resume(struct intel_engine_cs *engine)
+{
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
+ intel_mocs_init_engine(engine);
+
+ intel_breadcrumbs_reset(engine->breadcrumbs);
+
+ setup_hwsp(engine);
+ start_engine(engine);
+
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ xehp_enable_ccs_engines(engine);
+
+ return 0;
+}
+
+static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
+{
+ return !sched_engine->tasklet.callback;
+}
+
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = guc_submit_request;
+}
+
+static inline void guc_kernel_context_pin(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ /*
+ * Note: we purposefully do not check the returns below because
+ * the registration can only fail if a reset is just starting.
+ * This is called at the end of reset so presumably another reset
+ * isn't happening and even it did this code would be run again.
+ */
+
+ if (context_guc_id_invalid(ce))
+ pin_guc_id(guc, ce);
+
+ try_context_registration(ce, true);
+}
+
+static inline void guc_init_lrc_mapping(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* make sure all descriptors are clean... */
+ xa_destroy(&guc->context_lookup);
+
+ /*
+ * A reset might have occurred while we had a pending stalled request,
+ * so make sure we clean that up.
+ */
+ guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
+
+ /*
+ * Some contexts might have been pinned before we enabled GuC
+ * submission, so we need to add them to the GuC bookeeping.
+ * Also, after a reset the of the GuC we want to make sure that the
+ * information shared with GuC is properly reset. The kernel LRCs are
+ * not attached to the gem_context, so they need to be added separately.
+ */
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ list_for_each_entry(ce, &engine->pinned_contexts_list,
+ pinned_contexts_link)
+ guc_kernel_context_pin(guc, ce);
+ }
+}
+
+static void guc_release(struct intel_engine_cs *engine)
+{
+ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
+
+ intel_engine_cleanup_common(engine);
+ lrc_fini_wa_ctx(engine);
+}
+
+static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
+{
+ struct intel_engine_cs *e;
+ intel_engine_mask_t tmp, mask = engine->mask;
+
+ for_each_engine_masked(e, engine->gt, mask, tmp)
+ e->serial++;
+}
+
+static void guc_default_vfuncs(struct intel_engine_cs *engine)
+{
+ /* Default vfuncs which can be overridden by each engine. */
+
+ engine->resume = guc_resume;
+
+ engine->cops = &guc_context_ops;
+ engine->request_alloc = guc_request_alloc;
+ engine->add_active_request = add_to_context;
+ engine->remove_active_request = remove_from_context;
+
+ engine->sched_engine->schedule = i915_schedule;
+
+ engine->reset.prepare = guc_engine_reset_prepare;
+ engine->reset.rewind = guc_rewind_nop;
+ engine->reset.cancel = guc_reset_nop;
+ engine->reset.finish = guc_reset_nop;
+
+ engine->emit_flush = gen8_emit_flush_xcs;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
+ if (GRAPHICS_VER(engine->i915) >= 12) {
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
+ engine->emit_flush = gen12_emit_flush_xcs;
+ }
+ engine->set_default_submission = guc_set_default_submission;
+ engine->busyness = guc_engine_busyness;
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+
+ /* Wa_14014475959:dg2 */
+ if (IS_DG2(engine->i915) && engine->class == COMPUTE_CLASS)
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+
+ /*
+ * TODO: GuC supports timeslicing and semaphores as well, but they're
+ * handled by the firmware so some minor tweaks are required before
+ * enabling.
+ *
+ * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ */
+
+ engine->emit_bb_start = gen8_emit_bb_start;
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ engine->emit_bb_start = gen125_emit_bb_start;
+}
+
+static void rcs_submission_override(struct intel_engine_cs *engine)
+{
+ switch (GRAPHICS_VER(engine->i915)) {
+ case 12:
+ engine->emit_flush = gen12_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
+ case 11:
+ engine->emit_flush = gen11_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_rcs;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+
+static inline void guc_default_irqs(struct intel_engine_cs *engine)
+{
+ engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+ intel_engine_set_irq_handler(engine, cs_irq_handler);
+}
+
+static void guc_sched_engine_destroy(struct kref *kref)
+{
+ struct i915_sched_engine *sched_engine =
+ container_of(kref, typeof(*sched_engine), ref);
+ struct intel_guc *guc = sched_engine->private_data;
+
+ guc->sched_engine = NULL;
+ tasklet_kill(&sched_engine->tasklet); /* flush the callback */
+ kfree(sched_engine);
+}
+
+int intel_guc_submission_setup(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+
+ /*
+ * The setup relies on several assumptions (e.g. irqs always enabled)
+ * that are only valid on gen11+
+ */
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
+
+ if (!guc->sched_engine) {
+ guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
+ if (!guc->sched_engine)
+ return -ENOMEM;
+
+ guc->sched_engine->schedule = i915_schedule;
+ guc->sched_engine->disabled = guc_sched_engine_disabled;
+ guc->sched_engine->private_data = guc;
+ guc->sched_engine->destroy = guc_sched_engine_destroy;
+ guc->sched_engine->bump_inflight_request_prio =
+ guc_bump_inflight_request_prio;
+ guc->sched_engine->retire_inflight_request_prio =
+ guc_retire_inflight_request_prio;
+ tasklet_setup(&guc->sched_engine->tasklet,
+ guc_submission_tasklet);
+ }
+ i915_sched_engine_put(engine->sched_engine);
+ engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
+
+ guc_default_vfuncs(engine);
+ guc_default_irqs(engine);
+ guc_init_breadcrumbs(engine);
+
+ if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
+ rcs_submission_override(engine);
+
+ lrc_init_wa_ctx(engine);
+
+ /* Finally, take ownership and responsibility for cleanup! */
+ engine->sanitize = guc_sanitize;
+ engine->release = guc_release;
+
+ return 0;
+}
+
+void intel_guc_submission_enable(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ /* Enable and route to GuC */
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
+ GUC_SEM_INTR_ROUTE_TO_GUC |
+ GUC_SEM_INTR_ENABLE_ALL);
+
+ guc_init_lrc_mapping(guc);
+ guc_init_engine_stats(guc);
+}
+
+void intel_guc_submission_disable(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ /* Note: By the time we're here, GuC may have already been reset */
+
+ /* Disable and route to host */
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
+}
+
+static bool __guc_submission_supported(struct intel_guc *guc)
+{
+ /* GuC submission is unavailable for pre-Gen11 */
+ return intel_guc_is_supported(guc) &&
+ GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
+}
+
+static bool __guc_submission_selected(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ if (!intel_guc_submission_is_supported(guc))
+ return false;
+
+ return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
+}
+
+void intel_guc_submission_init_early(struct intel_guc *guc)
+{
+ xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
+
+ spin_lock_init(&guc->submission_state.lock);
+ INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
+ ida_init(&guc->submission_state.guc_ids);
+ INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
+ INIT_WORK(&guc->submission_state.destroyed_worker,
+ destroyed_worker_func);
+ INIT_WORK(&guc->submission_state.reset_fail_worker,
+ reset_fail_worker_func);
+
+ spin_lock_init(&guc->timestamp.lock);
+ INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
+
+ guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
+ guc->submission_supported = __guc_submission_supported(guc);
+ guc->submission_selected = __guc_submission_selected(guc);
+}
+
+static inline struct intel_context *
+g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
+{
+ struct intel_context *ce;
+
+ if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
+ drm_err(&guc_to_gt(guc)->i915->drm,
+ "Invalid ctx_id %u\n", ctx_id);
+ return NULL;
+ }
+
+ ce = __get_context(guc, ctx_id);
+ if (unlikely(!ce)) {
+ drm_err(&guc_to_gt(guc)->i915->drm,
+ "Context is NULL, ctx_id %u\n", ctx_id);
+ return NULL;
+ }
+
+ if (unlikely(intel_context_is_child(ce))) {
+ drm_err(&guc_to_gt(guc)->i915->drm,
+ "Context is child, ctx_id %u\n", ctx_id);
+ return NULL;
+ }
+
+ return ce;
+}
+
+int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
+ const u32 *msg,
+ u32 len)
+{
+ struct intel_context *ce;
+ u32 ctx_id;
+
+ if (unlikely(len < 1)) {
+ drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
+ return -EPROTO;
+ }
+ ctx_id = msg[0];
+
+ ce = g2h_context_lookup(guc, ctx_id);
+ if (unlikely(!ce))
+ return -EPROTO;
+
+ trace_intel_context_deregister_done(ce);
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_deregister)) {
+ ce->drop_deregister = false;
+ return 0;
+ }
+#endif
+
+ if (context_wait_for_deregister_to_register(ce)) {
+ struct intel_runtime_pm *runtime_pm =
+ &ce->engine->gt->i915->runtime_pm;
+ intel_wakeref_t wakeref;
+
+ /*
+ * Previous owner of this guc_id has been deregistered, now safe
+ * register this context.
+ */
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ register_context(ce, true);
+ guc_signal_context_fence(ce);
+ intel_context_put(ce);
+ } else if (context_destroyed(ce)) {
+ /* Context has been destroyed */
+ intel_gt_pm_put_async(guc_to_gt(guc));
+ release_guc_id(guc, ce);
+ __guc_context_destroy(ce);
+ }
+
+ decr_outstanding_submission_g2h(guc);
+
+ return 0;
+}
+
+int intel_guc_sched_done_process_msg(struct intel_guc *guc,
+ const u32 *msg,
+ u32 len)
+{
+ struct intel_context *ce;
+ unsigned long flags;
+ u32 ctx_id;
+
+ if (unlikely(len < 2)) {
+ drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
+ return -EPROTO;
+ }
+ ctx_id = msg[0];
+
+ ce = g2h_context_lookup(guc, ctx_id);
+ if (unlikely(!ce))
+ return -EPROTO;
+
+ if (unlikely(context_destroyed(ce) ||
+ (!context_pending_enable(ce) &&
+ !context_pending_disable(ce)))) {
+ drm_err(&guc_to_gt(guc)->i915->drm,
+ "Bad context sched_state 0x%x, ctx_id %u\n",
+ ce->guc_state.sched_state, ctx_id);
+ return -EPROTO;
+ }
+
+ trace_intel_context_sched_done(ce);
+
+ if (context_pending_enable(ce)) {
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_schedule_enable)) {
+ ce->drop_schedule_enable = false;
+ return 0;
+ }
+#endif
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ clr_context_pending_enable(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ } else if (context_pending_disable(ce)) {
+ bool banned;
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_schedule_disable)) {
+ ce->drop_schedule_disable = false;
+ return 0;
+ }
+#endif
+
+ /*
+ * Unpin must be done before __guc_signal_context_fence,
+ * otherwise a race exists between the requests getting
+ * submitted + retired before this unpin completes resulting in
+ * the pin_count going to zero and the context still being
+ * enabled.
+ */
+ intel_context_sched_disable_unpin(ce);
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ banned = context_banned(ce);
+ clr_context_banned(ce);
+ clr_context_pending_disable(ce);
+ __guc_signal_context_fence(ce);
+ guc_blocked_fence_complete(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ if (banned) {
+ guc_cancel_context_requests(ce);
+ intel_engine_signal_breadcrumbs(ce->engine);
+ }
+ }
+
+ decr_outstanding_submission_g2h(guc);
+ intel_context_put(ce);
+
+ return 0;
+}
+
+static void capture_error_state(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_engine_cs *engine = __context_to_physical_engine(ce);
+ intel_wakeref_t wakeref;
+
+ intel_engine_set_hung_context(engine, ce);
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ i915_capture_error_state(gt, engine->mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
+ atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
+}
+
+static void guc_context_replay(struct intel_context *ce)
+{
+ struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
+
+ __guc_reset_context(ce, ce->engine->mask);
+ tasklet_hi_schedule(&sched_engine->tasklet);
+}
+
+static void guc_handle_context_reset(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ trace_intel_context_reset(ce);
+
+ if (likely(intel_context_is_schedulable(ce))) {
+ capture_error_state(guc, ce);
+ guc_context_replay(ce);
+ } else {
+ drm_info(&guc_to_gt(guc)->i915->drm,
+ "Ignoring context reset notification of exiting context 0x%04X on %s",
+ ce->guc_id.id, ce->engine->name);
+ }
+}
+
+int intel_guc_context_reset_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len)
+{
+ struct intel_context *ce;
+ unsigned long flags;
+ int ctx_id;
+
+ if (unlikely(len != 1)) {
+ drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ ctx_id = msg[0];
+
+ /*
+ * The context lookup uses the xarray but lookups only require an RCU lock
+ * not the full spinlock. So take the lock explicitly and keep it until the
+ * context has been reference count locked to ensure it can't be destroyed
+ * asynchronously until the reset is done.
+ */
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ ce = g2h_context_lookup(guc, ctx_id);
+ if (ce)
+ intel_context_get(ce);
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
+ if (unlikely(!ce))
+ return -EPROTO;
+
+ guc_handle_context_reset(guc, ce);
+ intel_context_put(ce);
+
+ return 0;
+}
+
+int intel_guc_error_capture_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len)
+{
+ u32 status;
+
+ if (unlikely(len != 1)) {
+ drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
+ if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
+ drm_warn(&guc_to_gt(guc)->i915->drm, "G2H-Error capture no space");
+
+ intel_guc_capture_process(guc);
+
+ return 0;
+}
+
+struct intel_engine_cs *
+intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u8 engine_class = guc_class_to_engine_class(guc_class);
+
+ /* Class index is checked in class converter */
+ GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
+
+ return gt->engine_class[engine_class][instance];
+}
+
+static void reset_fail_worker_func(struct work_struct *w)
+{
+ struct intel_guc *guc = container_of(w, struct intel_guc,
+ submission_state.reset_fail_worker);
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_engine_mask_t reset_fail_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ reset_fail_mask = guc->submission_state.reset_fail_mask;
+ guc->submission_state.reset_fail_mask = 0;
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ if (likely(reset_fail_mask))
+ intel_gt_handle_error(gt, reset_fail_mask,
+ I915_ERROR_CAPTURE,
+ "GuC failed to reset engine mask=0x%x\n",
+ reset_fail_mask);
+}
+
+int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
+ const u32 *msg, u32 len)
+{
+ struct intel_engine_cs *engine;
+ struct intel_gt *gt = guc_to_gt(guc);
+ u8 guc_class, instance;
+ u32 reason;
+ unsigned long flags;
+
+ if (unlikely(len != 3)) {
+ drm_err(&gt->i915->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ guc_class = msg[0];
+ instance = msg[1];
+ reason = msg[2];
+
+ engine = intel_guc_lookup_engine(guc, guc_class, instance);
+ if (unlikely(!engine)) {
+ drm_err(&gt->i915->drm,
+ "Invalid engine %d:%d", guc_class, instance);
+ return -EPROTO;
+ }
+
+ /*
+ * This is an unexpected failure of a hardware feature. So, log a real
+ * error message not just the informational that comes with the reset.
+ */
+ drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
+ guc_class, instance, engine->name, reason);
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ guc->submission_state.reset_fail_mask |= engine->mask;
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+
+ /*
+ * A GT reset flushes this worker queue (G2H handler) so we must use
+ * another worker to trigger a GT reset.
+ */
+ queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
+
+ return 0;
+}
+
+void intel_guc_find_hung_context(struct intel_engine_cs *engine)
+{
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ unsigned long index;
+ unsigned long flags;
+
+ /* Reset called during driver load? GuC not yet initialised! */
+ if (unlikely(!guc_submission_initialized(guc)))
+ return;
+
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ bool found;
+
+ if (!kref_get_unless_zero(&ce->ref))
+ continue;
+
+ xa_unlock(&guc->context_lookup);
+
+ if (!intel_context_is_pinned(ce))
+ goto next;
+
+ if (intel_engine_is_virtual(ce->engine)) {
+ if (!(ce->engine->mask & engine->mask))
+ goto next;
+ } else {
+ if (ce->engine != engine)
+ goto next;
+ }
+
+ found = false;
+ spin_lock(&ce->guc_state.lock);
+ list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
+ if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
+ continue;
+
+ found = true;
+ break;
+ }
+ spin_unlock(&ce->guc_state.lock);
+
+ if (found) {
+ intel_engine_set_hung_context(engine, ce);
+
+ /* Can only cope with one hang at a time... */
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
+ goto done;
+ }
+
+next:
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
+ }
+done:
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m)
+{
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_context *ce;
+ unsigned long index;
+ unsigned long flags;
+
+ /* Reset called during driver load? GuC not yet initialised! */
+ if (unlikely(!guc_submission_initialized(guc)))
+ return;
+
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ if (!kref_get_unless_zero(&ce->ref))
+ continue;
+
+ xa_unlock(&guc->context_lookup);
+
+ if (!intel_context_is_pinned(ce))
+ goto next;
+
+ if (intel_engine_is_virtual(ce->engine)) {
+ if (!(ce->engine->mask & engine->mask))
+ goto next;
+ } else {
+ if (ce->engine != engine)
+ goto next;
+ }
+
+ spin_lock(&ce->guc_state.lock);
+ intel_engine_dump_active_requests(&ce->guc_state.requests,
+ hung_rq, m);
+ spin_unlock(&ce->guc_state.lock);
+
+next:
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+void intel_guc_submission_print_info(struct intel_guc *guc,
+ struct drm_printer *p)
+{
+ struct i915_sched_engine *sched_engine = guc->sched_engine;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ if (!sched_engine)
+ return;
+
+ drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
+ atomic_read(&guc->outstanding_submission_g2h));
+ drm_printf(p, "GuC tasklet count: %u\n\n",
+ atomic_read(&sched_engine->tasklet.count));
+
+ spin_lock_irqsave(&sched_engine->lock, flags);
+ drm_printf(p, "Requests in GuC submit tasklet:\n");
+ for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *pl = to_priolist(rb);
+ struct i915_request *rq;
+
+ priolist_for_each_request(rq, pl)
+ drm_printf(p, "guc_id=%u, seqno=%llu\n",
+ rq->context->guc_id.id,
+ rq->fence.seqno);
+ }
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+ drm_printf(p, "\n");
+}
+
+static inline void guc_log_context_priority(struct drm_printer *p,
+ struct intel_context *ce)
+{
+ int i;
+
+ drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
+ drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
+ for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
+ i < GUC_CLIENT_PRIORITY_NUM; ++i) {
+ drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
+ i, ce->guc_state.prio_count[i]);
+ }
+ drm_printf(p, "\n");
+}
+
+static inline void guc_log_context(struct drm_printer *p,
+ struct intel_context *ce)
+{
+ drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
+ drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ drm_printf(p, "\t\tContext Pin Count: %u\n",
+ atomic_read(&ce->pin_count));
+ drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
+ atomic_read(&ce->guc_id.ref));
+ drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
+ ce->guc_state.sched_state);
+}
+
+void intel_guc_submission_print_context_info(struct intel_guc *guc,
+ struct drm_printer *p)
+{
+ struct intel_context *ce;
+ unsigned long index;
+ unsigned long flags;
+
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ guc_log_context(p, ce);
+ guc_log_context_priority(p, ce);
+
+ if (intel_context_is_parent(ce)) {
+ struct intel_context *child;
+
+ drm_printf(p, "\t\tNumber children: %u\n",
+ ce->parallel.number_children);
+
+ if (ce->parallel.guc.wq_status) {
+ drm_printf(p, "\t\tWQI Head: %u\n",
+ READ_ONCE(*ce->parallel.guc.wq_head));
+ drm_printf(p, "\t\tWQI Tail: %u\n",
+ READ_ONCE(*ce->parallel.guc.wq_tail));
+ drm_printf(p, "\t\tWQI Status: %u\n\n",
+ READ_ONCE(*ce->parallel.guc.wq_status));
+ }
+
+ if (ce->engine->emit_bb_start ==
+ emit_bb_start_parent_no_preempt_mid_batch) {
+ u8 i;
+
+ drm_printf(p, "\t\tChildren Go: %u\n\n",
+ get_children_go_value(ce));
+ for (i = 0; i < ce->parallel.number_children; ++i)
+ drm_printf(p, "\t\tChildren Join: %u\n",
+ get_children_join_value(ce, i));
+ }
+
+ for_each_child(ce, child)
+ guc_log_context(p, child);
+ }
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+static inline u32 get_children_go_addr(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ return i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce) +
+ offsetof(struct parent_scratch, go.semaphore);
+}
+
+static inline u32 get_children_join_addr(struct intel_context *ce,
+ u8 child_index)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ return i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce) +
+ offsetof(struct parent_scratch, join[child_index].semaphore);
+}
+
+#define PARENT_GO_BB 1
+#define PARENT_GO_FINI_BREADCRUMB 0
+#define CHILD_GO_BB 1
+#define CHILD_GO_FINI_BREADCRUMB 0
+static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ struct intel_context *ce = rq->context;
+ u32 *cs;
+ u8 i;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Wait on children */
+ for (i = 0; i < ce->parallel.number_children; ++i) {
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = PARENT_GO_BB;
+ *cs++ = get_children_join_addr(ce, i);
+ *cs++ = 0;
+ }
+
+ /* Turn off preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+
+ /* Tell children go */
+ cs = gen8_emit_ggtt_write(cs,
+ CHILD_GO_BB,
+ get_children_go_addr(ce),
+ 0);
+
+ /* Jump to batch */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ struct intel_context *ce = rq->context;
+ struct intel_context *parent = intel_context_to_parent(ce);
+ u32 *cs;
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Signal parent */
+ cs = gen8_emit_ggtt_write(cs,
+ PARENT_GO_BB,
+ get_children_join_addr(parent,
+ ce->parallel.child_index),
+ 0);
+
+ /* Wait on parent for go */
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = CHILD_GO_BB;
+ *cs++ = get_children_go_addr(parent);
+ *cs++ = 0;
+
+ /* Turn off preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* Jump to batch */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 *
+__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+ u8 i;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ /* Wait on children */
+ for (i = 0; i < ce->parallel.number_children; ++i) {
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = PARENT_GO_FINI_BREADCRUMB;
+ *cs++ = get_children_join_addr(ce, i);
+ *cs++ = 0;
+ }
+
+ /* Turn on preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ /* Tell children go */
+ cs = gen8_emit_ggtt_write(cs,
+ CHILD_GO_FINI_BREADCRUMB,
+ get_children_go_addr(ce),
+ 0);
+
+ return cs;
+}
+
+/*
+ * If this true, a submission of multi-lrc requests had an error and the
+ * requests need to be skipped. The front end (execuf IOCTL) should've called
+ * i915_request_skip which squashes the BB but we still need to emit the fini
+ * breadrcrumbs seqno write. At this point we don't know how many of the
+ * requests in the multi-lrc submission were generated so we can't do the
+ * handshake between the parent and children (e.g. if 4 requests should be
+ * generated but 2nd hit an error only 1 would be seen by the GuC backend).
+ * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
+ * has occurred on any of the requests in submission / relationship.
+ */
+static inline bool skip_handshake(struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
+}
+
+#define NON_SKIP_LEN 6
+static u32 *
+emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+ __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
+ __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ if (unlikely(skip_handshake(rq))) {
+ /*
+ * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
+ * the NON_SKIP_LEN comes from the length of the emits below.
+ */
+ memset(cs, 0, sizeof(u32) *
+ (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
+ cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
+ } else {
+ cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
+ }
+
+ /* Emit fini breadcrumb */
+ before_fini_breadcrumb_user_interrupt_cs = cs;
+ cs = gen8_emit_ggtt_write(cs,
+ rq->fence.seqno,
+ i915_request_active_timeline(rq)->hwsp_offset,
+ 0);
+
+ /* User interrupt */
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ /* Ensure our math for skip + emit is correct */
+ GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
+ cs);
+ GEM_BUG_ON(start_fini_breadcrumb_cs +
+ ce->engine->emit_fini_breadcrumb_dw != cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+
+ return cs;
+}
+
+static u32 *
+__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+ struct intel_context *parent = intel_context_to_parent(ce);
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ /* Turn on preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ /* Signal parent */
+ cs = gen8_emit_ggtt_write(cs,
+ PARENT_GO_FINI_BREADCRUMB,
+ get_children_join_addr(parent,
+ ce->parallel.child_index),
+ 0);
+
+ /* Wait parent on for go */
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = CHILD_GO_FINI_BREADCRUMB;
+ *cs++ = get_children_go_addr(parent);
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+ __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
+ __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ if (unlikely(skip_handshake(rq))) {
+ /*
+ * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
+ * the NON_SKIP_LEN comes from the length of the emits below.
+ */
+ memset(cs, 0, sizeof(u32) *
+ (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
+ cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
+ } else {
+ cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
+ }
+
+ /* Emit fini breadcrumb */
+ before_fini_breadcrumb_user_interrupt_cs = cs;
+ cs = gen8_emit_ggtt_write(cs,
+ rq->fence.seqno,
+ i915_request_active_timeline(rq)->hwsp_offset,
+ 0);
+
+ /* User interrupt */
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ /* Ensure our math for skip + emit is correct */
+ GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
+ cs);
+ GEM_BUG_ON(start_fini_breadcrumb_cs +
+ ce->engine->emit_fini_breadcrumb_dw != cs);
+
+ rq->tail = intel_ring_offset(rq, cs);
+
+ return cs;
+}
+
+#undef NON_SKIP_LEN
+
+static struct intel_context *
+guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags)
+{
+ struct guc_virtual_engine *ve;
+ struct intel_guc *guc;
+ unsigned int n;
+ int err;
+
+ ve = kzalloc(sizeof(*ve), GFP_KERNEL);
+ if (!ve)
+ return ERR_PTR(-ENOMEM);
+
+ guc = &siblings[0]->gt->uc.guc;
+
+ ve->base.i915 = siblings[0]->i915;
+ ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
+ ve->base.id = -1;
+
+ ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+ ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.saturated = ALL_ENGINES;
+
+ snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+ ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
+
+ ve->base.cops = &virtual_guc_context_ops;
+ ve->base.request_alloc = guc_request_alloc;
+ ve->base.bump_serial = virtual_guc_bump_serial;
+
+ ve->base.submit_request = guc_submit_request;
+
+ ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+
+ BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
+ ve->base.mask = VIRTUAL_ENGINES;
+
+ intel_context_init(&ve->context, &ve->base);
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *sibling = siblings[n];
+
+ GEM_BUG_ON(!is_power_of_2(sibling->mask));
+ if (sibling->mask & ve->base.mask) {
+ DRM_DEBUG("duplicate %s entry in load balancer\n",
+ sibling->name);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ ve->base.mask |= sibling->mask;
+ ve->base.logical_mask |= sibling->logical_mask;
+
+ if (n != 0 && ve->base.class != sibling->class) {
+ DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
+ err = -EINVAL;
+ goto err_put;
+ } else if (n == 0) {
+ ve->base.class = sibling->class;
+ ve->base.uabi_class = sibling->uabi_class;
+ snprintf(ve->base.name, sizeof(ve->base.name),
+ "v%dx%d", ve->base.class, count);
+ ve->base.context_size = sibling->context_size;
+
+ ve->base.add_active_request =
+ sibling->add_active_request;
+ ve->base.remove_active_request =
+ sibling->remove_active_request;
+ ve->base.emit_bb_start = sibling->emit_bb_start;
+ ve->base.emit_flush = sibling->emit_flush;
+ ve->base.emit_init_breadcrumb =
+ sibling->emit_init_breadcrumb;
+ ve->base.emit_fini_breadcrumb =
+ sibling->emit_fini_breadcrumb;
+ ve->base.emit_fini_breadcrumb_dw =
+ sibling->emit_fini_breadcrumb_dw;
+ ve->base.breadcrumbs =
+ intel_breadcrumbs_get(sibling->breadcrumbs);
+
+ ve->base.flags |= sibling->flags;
+
+ ve->base.props.timeslice_duration_ms =
+ sibling->props.timeslice_duration_ms;
+ ve->base.props.preempt_timeout_ms =
+ sibling->props.preempt_timeout_ms;
+ }
+ }
+
+ return &ve->context;
+
+err_put:
+ intel_context_put(&ve->context);
+ return ERR_PTR(err);
+}
+
+bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
+{
+ struct intel_engine_cs *engine;
+ intel_engine_mask_t tmp, mask = ve->mask;
+
+ for_each_engine_masked(engine, ve->gt, mask, tmp)
+ if (READ_ONCE(engine->props.heartbeat_interval_ms))
+ return true;
+
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_guc.c"
+#include "selftest_guc_multi_lrc.c"
+#include "selftest_guc_hangcheck.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
new file mode 100644
index 000000000..5a95a9f0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_SUBMISSION_H_
+#define _INTEL_GUC_SUBMISSION_H_
+
+#include <linux/types.h>
+
+#include "intel_guc.h"
+
+struct drm_printer;
+struct intel_engine_cs;
+
+void intel_guc_submission_init_early(struct intel_guc *guc);
+int intel_guc_submission_init(struct intel_guc *guc);
+void intel_guc_submission_enable(struct intel_guc *guc);
+void intel_guc_submission_disable(struct intel_guc *guc);
+void intel_guc_submission_fini(struct intel_guc *guc);
+int intel_guc_preempt_work_create(struct intel_guc *guc);
+void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+int intel_guc_submission_setup(struct intel_engine_cs *engine);
+void intel_guc_submission_print_info(struct intel_guc *guc,
+ struct drm_printer *p);
+void intel_guc_submission_print_context_info(struct intel_guc *guc,
+ struct drm_printer *p);
+void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m);
+void intel_guc_busyness_park(struct intel_gt *gt);
+void intel_guc_busyness_unpark(struct intel_gt *gt);
+
+bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
+
+int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
+ atomic_t *wait_var,
+ bool interruptible,
+ long timeout);
+
+static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
+{
+ return guc->submission_supported;
+}
+
+static inline bool intel_guc_submission_is_wanted(struct intel_guc *guc)
+{
+ return guc->submission_selected;
+}
+
+static inline bool intel_guc_submission_is_used(struct intel_guc *guc)
+{
+ return intel_guc_is_used(guc) && intel_guc_submission_is_wanted(guc);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
new file mode 100644
index 000000000..3bb8838e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include "gt/intel_gt.h"
+#include "intel_guc_reg.h"
+#include "intel_huc.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: HuC
+ *
+ * The HuC is a dedicated microcontroller for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can directly use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * The kernel driver is only responsible for loading the HuC firmware and
+ * triggering its security authentication, which is performed by the GuC on
+ * older platforms and by the GSC on newer ones. For the GuC to correctly
+ * perform the authentication, the HuC binary must be loaded before the GuC one.
+ * Loading the HuC is optional; however, not using the HuC might negatively
+ * impact power usage and/or performance of media workloads, depending on the
+ * use-cases.
+ * HuC must be reloaded on events that cause the WOPCM to lose its contents
+ * (S3/S4, FLR); GuC-authenticated HuC must also be reloaded on GuC/GT reset,
+ * while GSC-managed HuC will survive that.
+ *
+ * See https://github.com/intel/media-driver for the latest details on HuC
+ * functionality.
+ */
+
+/**
+ * DOC: HuC Memory Management
+ *
+ * Similarly to the GuC, the HuC can't do any memory allocations on its own,
+ * with the difference being that the allocations for HuC usage are handled by
+ * the userspace driver instead of the kernel one. The HuC accesses the memory
+ * via the PPGTT belonging to the context loaded on the VCS executing the
+ * HuC-specific commands.
+ */
+
+void intel_huc_init_early(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
+
+ intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
+
+ if (GRAPHICS_VER(i915) >= 11) {
+ huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
+ huc->status.mask = HUC_LOAD_SUCCESSFUL;
+ huc->status.value = HUC_LOAD_SUCCESSFUL;
+ } else {
+ huc->status.reg = HUC_STATUS2;
+ huc->status.mask = HUC_FW_VERIFIED;
+ huc->status.value = HUC_FW_VERIFIED;
+ }
+}
+
+#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
+static int check_huc_loading_mode(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ bool fw_needs_gsc = intel_huc_is_loaded_by_gsc(huc);
+ bool hw_uses_gsc = false;
+
+ /*
+ * The fuse for HuC load via GSC is only valid on platforms that have
+ * GuC deprivilege.
+ */
+ if (HAS_GUC_DEPRIVILEGE(gt->i915))
+ hw_uses_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) &
+ GSC_LOADS_HUC;
+
+ if (fw_needs_gsc != hw_uses_gsc) {
+ drm_err(&gt->i915->drm,
+ "mismatch between HuC FW (%s) and HW (%s) load modes\n",
+ HUC_LOAD_MODE_STRING(fw_needs_gsc),
+ HUC_LOAD_MODE_STRING(hw_uses_gsc));
+ return -ENOEXEC;
+ }
+
+ /* make sure we can access the GSC via the mei driver if we need it */
+ if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
+ fw_needs_gsc) {
+ drm_info(&gt->i915->drm,
+ "Can't load HuC due to missing MEI modules\n");
+ return -EIO;
+ }
+
+ drm_dbg(&gt->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc));
+
+ return 0;
+}
+
+int intel_huc_init(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
+ int err;
+
+ err = check_huc_loading_mode(huc);
+ if (err)
+ goto out;
+
+ err = intel_uc_fw_init(&huc->fw);
+ if (err)
+ goto out;
+
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
+ return 0;
+
+out:
+ drm_info(&i915->drm, "HuC init failed with %d\n", err);
+ return err;
+}
+
+void intel_huc_fini(struct intel_huc *huc)
+{
+ if (!intel_uc_fw_is_loadable(&huc->fw))
+ return;
+
+ intel_uc_fw_fini(&huc->fw);
+}
+
+/**
+ * intel_huc_auth() - Authenticate HuC uCode
+ * @huc: intel_huc structure
+ *
+ * Called after HuC and GuC firmware loading during intel_uc_init_hw().
+ *
+ * This function invokes the GuC action to authenticate the HuC firmware,
+ * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
+ * waits for up to 50ms for firmware verification ACK.
+ */
+int intel_huc_auth(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = &gt->uc.guc;
+ int ret;
+
+ if (!intel_uc_fw_is_loaded(&huc->fw))
+ return -ENOEXEC;
+
+ /* GSC will do the auth */
+ if (intel_huc_is_loaded_by_gsc(huc))
+ return -ENODEV;
+
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (ret)
+ goto fail;
+
+ GEM_BUG_ON(intel_uc_fw_is_running(&huc->fw));
+
+ ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
+ if (ret) {
+ DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+ goto fail;
+ }
+
+ /* Check authentication status, it should be done by now */
+ ret = __intel_wait_for_register(gt->uncore,
+ huc->status.reg,
+ huc->status.mask,
+ huc->status.value,
+ 2, 50, NULL);
+ if (ret) {
+ DRM_ERROR("HuC: Firmware not verified %d\n", ret);
+ goto fail;
+ }
+
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ drm_info(&gt->i915->drm, "HuC authenticated\n");
+ return 0;
+
+fail:
+ i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
+ return ret;
+}
+
+static bool huc_is_authenticated(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ intel_wakeref_t wakeref;
+ u32 status = 0;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ status = intel_uncore_read(gt->uncore, huc->status.reg);
+
+ return (status & huc->status.mask) == huc->status.value;
+}
+
+/**
+ * intel_huc_check_status() - check HuC status
+ * @huc: intel_huc structure
+ *
+ * This function reads status register to verify if HuC
+ * firmware was successfully loaded.
+ *
+ * Returns:
+ * * -ENODEV if HuC is not present on this platform,
+ * * -EOPNOTSUPP if HuC firmware is disabled,
+ * * -ENOPKG if HuC firmware was not installed,
+ * * -ENOEXEC if HuC firmware is invalid or mismatched,
+ * * 0 if HuC firmware is not running,
+ * * 1 if HuC firmware is authenticated and running.
+ */
+int intel_huc_check_status(struct intel_huc *huc)
+{
+ switch (__intel_uc_fw_status(&huc->fw)) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return -ENODEV;
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return -EOPNOTSUPP;
+ case INTEL_UC_FIRMWARE_MISSING:
+ return -ENOPKG;
+ case INTEL_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ default:
+ break;
+ }
+
+ return huc_is_authenticated(huc);
+}
+
+void intel_huc_update_auth_status(struct intel_huc *huc)
+{
+ if (!intel_uc_fw_is_loadable(&huc->fw))
+ return;
+
+ if (huc_is_authenticated(huc))
+ intel_uc_fw_change_status(&huc->fw,
+ INTEL_UC_FIRMWARE_RUNNING);
+}
+
+/**
+ * intel_huc_load_status - dump information about HuC load status
+ * @huc: the HuC
+ * @p: the &drm_printer
+ *
+ * Pretty printer for HuC load status.
+ */
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ intel_wakeref_t wakeref;
+
+ if (!intel_huc_is_supported(huc)) {
+ drm_printf(p, "HuC not supported\n");
+ return;
+ }
+
+ if (!intel_huc_is_wanted(huc)) {
+ drm_printf(p, "HuC disabled\n");
+ return;
+ }
+
+ intel_uc_fw_dump(&huc->fw, p);
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ drm_printf(p, "HuC status: 0x%08x\n",
+ intel_uncore_read(gt->uncore, huc->status.reg));
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
new file mode 100644
index 000000000..d7e25b6e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_HUC_H_
+#define _INTEL_HUC_H_
+
+#include "i915_reg_defs.h"
+#include "intel_uc_fw.h"
+#include "intel_huc_fw.h"
+
+struct intel_huc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* HuC-specific additions */
+ struct {
+ i915_reg_t reg;
+ u32 mask;
+ u32 value;
+ } status;
+};
+
+void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init(struct intel_huc *huc);
+void intel_huc_fini(struct intel_huc *huc);
+int intel_huc_auth(struct intel_huc *huc);
+int intel_huc_check_status(struct intel_huc *huc);
+void intel_huc_update_auth_status(struct intel_huc *huc);
+
+static inline int intel_huc_sanitize(struct intel_huc *huc)
+{
+ intel_uc_fw_sanitize(&huc->fw);
+ return 0;
+}
+
+static inline bool intel_huc_is_supported(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_supported(&huc->fw);
+}
+
+static inline bool intel_huc_is_wanted(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_enabled(&huc->fw);
+}
+
+static inline bool intel_huc_is_used(struct intel_huc *huc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&huc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&huc->fw);
+}
+
+static inline bool intel_huc_is_loaded_by_gsc(const struct intel_huc *huc)
+{
+ return huc->fw.loaded_via_gsc;
+}
+
+void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
new file mode 100644
index 000000000..15998963b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+#include "intel_huc.h"
+#include "intel_huc_debugfs.h"
+
+static int huc_info_show(struct seq_file *m, void *data)
+{
+ struct intel_huc *huc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!intel_huc_is_supported(huc))
+ return -ENODEV;
+
+ intel_huc_load_status(huc, &p);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(huc_info);
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "huc_info", &huc_info_fops, NULL },
+ };
+
+ if (!intel_huc_is_supported(huc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), huc);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
new file mode 100644
index 000000000..be79e992f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_HUC_H
+#define DEBUGFS_HUC_H
+
+struct intel_huc;
+struct dentry;
+
+void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root);
+
+#endif /* DEBUGFS_HUC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
new file mode 100644
index 000000000..9d6ab1e01
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "intel_huc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * intel_huc_fw_upload() - load HuC uCode to device via DMA transfer
+ * @huc: intel_huc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset. Note that HuC must be loaded before GuC.
+ *
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_huc_fw_upload(struct intel_huc *huc)
+{
+ if (intel_huc_is_loaded_by_gsc(huc))
+ return -ENODEV;
+
+ /* HW doesn't look at destination address for HuC, so set it to 0 */
+ return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
new file mode 100644
index 000000000..12f264ee3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_HUC_FW_H_
+#define _INTEL_HUC_FW_H_
+
+struct intel_huc;
+
+int intel_huc_fw_upload(struct intel_huc *huc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
new file mode 100644
index 000000000..dbd048b77
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_reset.h"
+#include "intel_guc.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_submission.h"
+#include "gt/intel_rps.h"
+#include "intel_uc.h"
+
+#include "i915_drv.h"
+
+static const struct intel_uc_ops uc_ops_off;
+static const struct intel_uc_ops uc_ops_on;
+
+static void uc_expand_default_options(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ if (i915->params.enable_guc != -1)
+ return;
+
+ /* Don't enable GuC/HuC on pre-Gen12 */
+ if (GRAPHICS_VER(i915) < 12) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Don't enable GuC/HuC on older Gen12 platforms */
+ if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+ i915->params.enable_guc = 0;
+ return;
+ }
+
+ /* Intermediate platforms are HuC authentication only */
+ if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
+ i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
+ return;
+ }
+
+ /* Default: enable HuC authentication and GuC submission */
+ i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
+
+ /* XEHPSDV and PVC do not use HuC */
+ if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
+ i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
+}
+
+/* Reset GuC providing us with fresh state for both GuC and HuC.
+ */
+static int __intel_uc_reset_hw(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ int ret;
+ u32 guc_status;
+
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (ret)
+ return ret;
+
+ ret = intel_reset_guc(gt);
+ if (ret) {
+ DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
+ return ret;
+ }
+
+ guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
+ WARN(!(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n",
+ guc_status);
+
+ return ret;
+}
+
+static void __confirm_options(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ drm_dbg(&i915->drm,
+ "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
+ i915->params.enable_guc,
+ str_yes_no(intel_uc_wants_guc(uc)),
+ str_yes_no(intel_uc_wants_guc_submission(uc)),
+ str_yes_no(intel_uc_wants_huc(uc)),
+ str_yes_no(intel_uc_wants_guc_slpc(uc)));
+
+ if (i915->params.enable_guc == 0) {
+ GEM_BUG_ON(intel_uc_wants_guc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_wants_huc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc_slpc(uc));
+ return;
+ }
+
+ if (!intel_uc_supports_guc(uc))
+ drm_info(&i915->drm,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC is not supported!");
+
+ if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
+ !intel_uc_supports_huc(uc))
+ drm_info(&i915->drm,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "HuC is not supported!");
+
+ if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
+ !intel_uc_supports_guc_submission(uc))
+ drm_info(&i915->drm,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC submission is N/A");
+
+ if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
+ drm_info(&i915->drm,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "undocumented flag");
+}
+
+void intel_uc_init_early(struct intel_uc *uc)
+{
+ uc_expand_default_options(uc);
+
+ intel_guc_init_early(&uc->guc);
+ intel_huc_init_early(&uc->huc);
+
+ __confirm_options(uc);
+
+ if (intel_uc_wants_guc(uc))
+ uc->ops = &uc_ops_on;
+ else
+ uc->ops = &uc_ops_off;
+}
+
+void intel_uc_init_late(struct intel_uc *uc)
+{
+ intel_guc_init_late(&uc->guc);
+}
+
+void intel_uc_driver_late_release(struct intel_uc *uc)
+{
+}
+
+/**
+ * intel_uc_init_mmio - setup uC MMIO access
+ * @uc: the intel_uc structure
+ *
+ * Setup minimal state necessary for MMIO accesses later in the
+ * initialization sequence.
+ */
+void intel_uc_init_mmio(struct intel_uc *uc)
+{
+ intel_guc_init_send_regs(&uc->guc);
+}
+
+static void __uc_capture_load_err_log(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (guc->log.vma && !uc->load_err_log)
+ uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
+}
+
+static void __uc_free_load_err_log(struct intel_uc *uc)
+{
+ struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
+
+ if (log)
+ i915_gem_object_put(log);
+}
+
+void intel_uc_driver_remove(struct intel_uc *uc)
+{
+ intel_uc_fini_hw(uc);
+ intel_uc_fini(uc);
+ __uc_free_load_err_log(uc);
+}
+
+/*
+ * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
+ * register using the same bits used in the CT message payload. Since our
+ * communication channel with guc is turned off at this point, we can save the
+ * message and handle it after we turn it back on.
+ */
+static void guc_clear_mmio_msg(struct intel_guc *guc)
+{
+ intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
+}
+
+static void guc_get_mmio_msg(struct intel_guc *guc)
+{
+ u32 val;
+
+ spin_lock_irq(&guc->irq_lock);
+
+ val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
+ guc->mmio_msg |= val & guc->msg_enabled_mask;
+
+ /*
+ * clear all events, including the ones we're not currently servicing,
+ * to make sure we don't try to process a stale message if we enable
+ * handling of more events later.
+ */
+ guc_clear_mmio_msg(guc);
+
+ spin_unlock_irq(&guc->irq_lock);
+}
+
+static void guc_handle_mmio_msg(struct intel_guc *guc)
+{
+ /* we need communication to be enabled to reply to GuC */
+ GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
+
+ spin_lock_irq(&guc->irq_lock);
+ if (guc->mmio_msg) {
+ intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+ guc->mmio_msg = 0;
+ }
+ spin_unlock_irq(&guc->irq_lock);
+}
+
+static int guc_enable_communication(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
+ int ret;
+
+ GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
+
+ ret = i915_inject_probe_error(i915, -ENXIO);
+ if (ret)
+ return ret;
+
+ ret = intel_guc_ct_enable(&guc->ct);
+ if (ret)
+ return ret;
+
+ /* check for mmio messages received before/during the CT enable */
+ guc_get_mmio_msg(guc);
+ guc_handle_mmio_msg(guc);
+
+ intel_guc_enable_interrupts(guc);
+
+ /* check for CT messages received before we enabled interrupts */
+ spin_lock_irq(gt->irq_lock);
+ intel_guc_ct_event_handler(&guc->ct);
+ spin_unlock_irq(gt->irq_lock);
+
+ drm_dbg(&i915->drm, "GuC communication enabled\n");
+
+ return 0;
+}
+
+static void guc_disable_communication(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ /*
+ * Events generated during or after CT disable are logged by guc in
+ * via mmio. Make sure the register is clear before disabling CT since
+ * all events we cared about have already been processed via CT.
+ */
+ guc_clear_mmio_msg(guc);
+
+ intel_guc_disable_interrupts(guc);
+
+ intel_guc_ct_disable(&guc->ct);
+
+ /*
+ * Check for messages received during/after the CT disable. We do not
+ * expect any messages to have arrived via CT between the interrupt
+ * disable and the CT disable because GuC should've been idle until we
+ * triggered the CT disable protocol.
+ */
+ guc_get_mmio_msg(guc);
+
+ drm_dbg(&i915->drm, "GuC communication disabled\n");
+}
+
+static void __uc_fetch_firmwares(struct intel_uc *uc)
+{
+ int err;
+
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
+
+ err = intel_uc_fw_fetch(&uc->guc.fw);
+ if (err) {
+ /* Make sure we transition out of transient "SELECTED" state */
+ if (intel_uc_wants_huc(uc)) {
+ drm_dbg(&uc_to_gt(uc)->i915->drm,
+ "Failed to fetch GuC: %d disabling HuC\n", err);
+ intel_uc_fw_change_status(&uc->huc.fw,
+ INTEL_UC_FIRMWARE_ERROR);
+ }
+
+ return;
+ }
+
+ if (intel_uc_wants_huc(uc))
+ intel_uc_fw_fetch(&uc->huc.fw);
+}
+
+static void __uc_cleanup_firmwares(struct intel_uc *uc)
+{
+ intel_uc_fw_cleanup_fetch(&uc->huc.fw);
+ intel_uc_fw_cleanup_fetch(&uc->guc.fw);
+}
+
+static int __uc_init(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret;
+
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
+
+ if (!intel_uc_uses_guc(uc))
+ return 0;
+
+ if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
+ return -ENOMEM;
+
+ ret = intel_guc_init(guc);
+ if (ret)
+ return ret;
+
+ if (intel_uc_uses_huc(uc))
+ intel_huc_init(huc);
+
+ return 0;
+}
+
+static void __uc_fini(struct intel_uc *uc)
+{
+ intel_huc_fini(&uc->huc);
+ intel_guc_fini(&uc->guc);
+}
+
+static int __uc_sanitize(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+
+ intel_huc_sanitize(huc);
+ intel_guc_sanitize(guc);
+
+ return __intel_uc_reset_hw(uc);
+}
+
+/* Initialize and verify the uC regs related to uC positioning in WOPCM */
+static int uc_init_wopcm(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct intel_uncore *uncore = gt->uncore;
+ u32 base = intel_wopcm_guc_base(&gt->i915->wopcm);
+ u32 size = intel_wopcm_guc_size(&gt->i915->wopcm);
+ u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
+ u32 mask;
+ int err;
+
+ if (unlikely(!base || !size)) {
+ i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
+ return -E2BIG;
+ }
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+ GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
+ GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
+ GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
+ GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
+
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
+ mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
+ err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
+ size | GUC_WOPCM_SIZE_LOCKED);
+ if (err)
+ goto err_out;
+
+ mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
+ err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
+ base | huc_agent, mask,
+ base | huc_agent |
+ GUC_WOPCM_OFFSET_VALID);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
+ i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
+ i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
+ intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
+ i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
+ i915_mmio_reg_offset(GUC_WOPCM_SIZE),
+ intel_uncore_read(uncore, GUC_WOPCM_SIZE));
+
+ return err;
+}
+
+static bool uc_is_wopcm_locked(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct intel_uncore *uncore = gt->uncore;
+
+ return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
+ (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
+}
+
+static int __uc_check_hw(struct intel_uc *uc)
+{
+ if (!intel_uc_supports_guc(uc))
+ return 0;
+
+ /*
+ * We can silently continue without GuC only if it was never enabled
+ * before on this system after reboot, otherwise we risk GPU hangs.
+ * To check if GuC was loaded before we look at WOPCM registers.
+ */
+ if (uc_is_wopcm_locked(uc))
+ return -EIO;
+
+ return 0;
+}
+
+static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",
+ intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
+ fw->file_selected.major_ver,
+ fw->file_selected.minor_ver,
+ fw->file_selected.patch_ver);
+}
+
+static int __uc_init_hw(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret, attempts;
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
+
+ print_fw_ver(uc, &guc->fw);
+
+ if (intel_uc_uses_huc(uc))
+ print_fw_ver(uc, &huc->fw);
+
+ if (!intel_uc_fw_is_loadable(&guc->fw)) {
+ ret = __uc_check_hw(uc) ||
+ intel_uc_fw_is_overridden(&guc->fw) ||
+ intel_uc_wants_guc_submission(uc) ?
+ intel_uc_fw_status_to_error(guc->fw.status) : 0;
+ goto err_out;
+ }
+
+ ret = uc_init_wopcm(uc);
+ if (ret)
+ goto err_out;
+
+ intel_guc_reset_interrupts(guc);
+
+ /* WaEnableuKernelHeaderValidFix:skl */
+ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
+ if (GRAPHICS_VER(i915) == 9)
+ attempts = 3;
+ else
+ attempts = 1;
+
+ intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
+
+ while (attempts--) {
+ /*
+ * Always reset the GuC just before (re)loading, so
+ * that the state and timing are fairly predictable
+ */
+ ret = __uc_sanitize(uc);
+ if (ret)
+ goto err_out;
+
+ intel_huc_fw_upload(huc);
+ intel_guc_ads_reset(guc);
+ intel_guc_write_params(guc);
+ ret = intel_guc_fw_upload(guc);
+ if (ret == 0)
+ break;
+
+ DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
+ "retry %d more time(s)\n", ret, attempts);
+ }
+
+ /* Did we succeded or run out of retries? */
+ if (ret)
+ goto err_log_capture;
+
+ ret = guc_enable_communication(guc);
+ if (ret)
+ goto err_log_capture;
+
+ /*
+ * GSC-loaded HuC is authenticated by the GSC, so we don't need to
+ * trigger the auth here. However, given that the HuC loaded this way
+ * survive GT reset, we still need to update our SW bookkeeping to make
+ * sure it reflects the correct HW status.
+ */
+ if (intel_huc_is_loaded_by_gsc(huc))
+ intel_huc_update_auth_status(huc);
+ else
+ intel_huc_auth(huc);
+
+ if (intel_uc_uses_guc_submission(uc))
+ intel_guc_submission_enable(guc);
+
+ if (intel_uc_uses_guc_slpc(uc)) {
+ ret = intel_guc_slpc_enable(&guc->slpc);
+ if (ret)
+ goto err_submission;
+ } else {
+ /* Restore GT back to RPn for non-SLPC path */
+ intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
+ }
+
+ drm_info(&i915->drm, "GuC submission %s\n",
+ str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
+ drm_info(&i915->drm, "GuC SLPC %s\n",
+ str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
+
+ return 0;
+
+ /*
+ * We've failed to load the firmware :(
+ */
+err_submission:
+ intel_guc_submission_disable(guc);
+err_log_capture:
+ __uc_capture_load_err_log(uc);
+err_out:
+ /* Return GT back to RPn */
+ intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
+
+ __uc_sanitize(uc);
+
+ if (!ret) {
+ drm_notice(&i915->drm, "GuC is uninitialized\n");
+ /* We want to run without GuC submission */
+ return 0;
+ }
+
+ i915_probe_error(i915, "GuC initialization failed %d\n", ret);
+
+ /* We want to keep KMS alive */
+ return -EIO;
+}
+
+static void __uc_fini_hw(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_guc_is_fw_running(guc))
+ return;
+
+ if (intel_uc_uses_guc_submission(uc))
+ intel_guc_submission_disable(guc);
+
+ __uc_sanitize(uc);
+}
+
+/**
+ * intel_uc_reset_prepare - Prepare for reset
+ * @uc: the intel_uc structure
+ *
+ * Preparing for full gpu reset.
+ */
+void intel_uc_reset_prepare(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ uc->reset_in_progress = true;
+
+ /* Nothing to do if GuC isn't supported */
+ if (!intel_uc_supports_guc(uc))
+ return;
+
+ /* Firmware expected to be running when this function is called */
+ if (!intel_guc_is_ready(guc))
+ goto sanitize;
+
+ if (intel_uc_uses_guc_submission(uc))
+ intel_guc_submission_reset_prepare(guc);
+
+sanitize:
+ __uc_sanitize(uc);
+}
+
+void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ /* Firmware can not be running when this function is called */
+ if (intel_uc_uses_guc_submission(uc))
+ intel_guc_submission_reset(guc, stalled);
+}
+
+void intel_uc_reset_finish(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ uc->reset_in_progress = false;
+
+ /* Firmware expected to be running when this function is called */
+ if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc))
+ intel_guc_submission_reset_finish(guc);
+}
+
+void intel_uc_cancel_requests(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ /* Firmware can not be running when this function is called */
+ if (intel_uc_uses_guc_submission(uc))
+ intel_guc_submission_cancel_requests(guc);
+}
+
+void intel_uc_runtime_suspend(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_guc_is_ready(guc))
+ return;
+
+ /*
+ * Wait for any outstanding CTB before tearing down communication /w the
+ * GuC.
+ */
+#define OUTSTANDING_CTB_TIMEOUT_PERIOD (HZ / 5)
+ intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
+ false, OUTSTANDING_CTB_TIMEOUT_PERIOD);
+ GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
+
+ guc_disable_communication(guc);
+}
+
+void intel_uc_suspend(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ intel_wakeref_t wakeref;
+ int err;
+
+ if (!intel_guc_is_ready(guc))
+ return;
+
+ with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
+ err = intel_guc_suspend(guc);
+ if (err)
+ DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+ }
+}
+
+static int __uc_resume(struct intel_uc *uc, bool enable_communication)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_gt *gt = guc_to_gt(guc);
+ int err;
+
+ if (!intel_guc_is_fw_running(guc))
+ return 0;
+
+ /* Make sure we enable communication if and only if it's disabled */
+ GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
+
+ if (enable_communication)
+ guc_enable_communication(guc);
+
+ /* If we are only resuming GuC communication but not reloading
+ * GuC, we need to ensure the ARAT timer interrupt is enabled
+ * again. In case of GuC reload, it is enabled during SLPC enable.
+ */
+ if (enable_communication && intel_uc_uses_guc_slpc(uc))
+ intel_guc_pm_intrmsk_enable(gt);
+
+ err = intel_guc_resume(guc);
+ if (err) {
+ DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_uc_resume(struct intel_uc *uc)
+{
+ /*
+ * When coming out of S3/S4 we sanitize and re-init the HW, so
+ * communication is already re-enabled at this point.
+ */
+ return __uc_resume(uc, false);
+}
+
+int intel_uc_runtime_resume(struct intel_uc *uc)
+{
+ /*
+ * During runtime resume we don't sanitize, so we need to re-init
+ * communication as well.
+ */
+ return __uc_resume(uc, true);
+}
+
+static const struct intel_uc_ops uc_ops_off = {
+ .init_hw = __uc_check_hw,
+};
+
+static const struct intel_uc_ops uc_ops_on = {
+ .sanitize = __uc_sanitize,
+
+ .init_fw = __uc_fetch_firmwares,
+ .fini_fw = __uc_cleanup_firmwares,
+
+ .init = __uc_init,
+ .fini = __uc_fini,
+
+ .init_hw = __uc_init_hw,
+ .fini_hw = __uc_fini_hw,
+};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
new file mode 100644
index 000000000..a8f38c2c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_H_
+#define _INTEL_UC_H_
+
+#include "intel_guc.h"
+#include "intel_guc_rc.h"
+#include "intel_guc_submission.h"
+#include "intel_guc_slpc.h"
+#include "intel_huc.h"
+#include "i915_params.h"
+
+struct intel_uc;
+
+struct intel_uc_ops {
+ int (*sanitize)(struct intel_uc *uc);
+ void (*init_fw)(struct intel_uc *uc);
+ void (*fini_fw)(struct intel_uc *uc);
+ int (*init)(struct intel_uc *uc);
+ void (*fini)(struct intel_uc *uc);
+ int (*init_hw)(struct intel_uc *uc);
+ void (*fini_hw)(struct intel_uc *uc);
+};
+
+struct intel_uc {
+ struct intel_uc_ops const *ops;
+ struct intel_guc guc;
+ struct intel_huc huc;
+
+ /* Snapshot of GuC log from last failed load */
+ struct drm_i915_gem_object *load_err_log;
+
+ bool reset_in_progress;
+};
+
+void intel_uc_init_early(struct intel_uc *uc);
+void intel_uc_init_late(struct intel_uc *uc);
+void intel_uc_driver_late_release(struct intel_uc *uc);
+void intel_uc_driver_remove(struct intel_uc *uc);
+void intel_uc_init_mmio(struct intel_uc *uc);
+void intel_uc_reset_prepare(struct intel_uc *uc);
+void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled);
+void intel_uc_reset_finish(struct intel_uc *uc);
+void intel_uc_cancel_requests(struct intel_uc *uc);
+void intel_uc_suspend(struct intel_uc *uc);
+void intel_uc_runtime_suspend(struct intel_uc *uc);
+int intel_uc_resume(struct intel_uc *uc);
+int intel_uc_runtime_resume(struct intel_uc *uc);
+
+/*
+ * We need to know as early as possible if we're going to use GuC or not to
+ * take the correct setup paths. Additionally, once we've started loading the
+ * GuC, it is unsafe to keep executing without it because some parts of the HW,
+ * a subset of which is not cleaned on GT reset, will start expecting the GuC FW
+ * to be running.
+ * To solve both these requirements, we commit to using the microcontrollers if
+ * the relevant modparam is set and the blobs are found on the system. At this
+ * stage, the only thing that can stop us from attempting to load the blobs on
+ * the HW and use them is a fundamental issue (e.g. no memory for our
+ * structures); if we hit such a problem during driver load we're broken even
+ * without GuC, so there is no point in trying to fall back.
+ *
+ * Given the above, we can be in one of 4 states, with the last one implying
+ * we're committed to using the microcontroller:
+ * - Not supported: not available in HW and/or firmware not defined.
+ * - Supported: available in HW and firmware defined.
+ * - Wanted: supported + enabled in modparam.
+ * - In use: wanted + firmware found on the system and successfully fetched.
+ */
+
+#define __uc_state_checker(x, func, state, required) \
+static inline bool intel_uc_##state##_##func(struct intel_uc *uc) \
+{ \
+ return intel_##func##_is_##required(&uc->x); \
+}
+
+#define uc_state_checkers(x, func) \
+__uc_state_checker(x, func, supports, supported) \
+__uc_state_checker(x, func, wants, wanted) \
+__uc_state_checker(x, func, uses, used)
+
+uc_state_checkers(guc, guc);
+uc_state_checkers(huc, huc);
+uc_state_checkers(guc, guc_submission);
+uc_state_checkers(guc, guc_slpc);
+uc_state_checkers(guc, guc_rc);
+
+#undef uc_state_checkers
+#undef __uc_state_checker
+
+static inline int intel_uc_wait_for_idle(struct intel_uc *uc, long timeout)
+{
+ return intel_guc_wait_for_idle(&uc->guc, timeout);
+}
+
+#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
+static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
+{ \
+ if (uc->ops->_OPS) \
+ return uc->ops->_OPS(uc); \
+ return _RET; \
+}
+intel_uc_ops_function(sanitize, sanitize, int, 0);
+intel_uc_ops_function(fetch_firmwares, init_fw, void, );
+intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
+intel_uc_ops_function(init, init, int, 0);
+intel_uc_ops_function(fini, fini, void, );
+intel_uc_ops_function(init_hw, init_hw, int, 0);
+intel_uc_ops_function(fini_hw, fini_hw, void, );
+#undef intel_uc_ops_function
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
new file mode 100644
index 000000000..284d6fbc2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+#include "intel_guc_debugfs.h"
+#include "intel_huc_debugfs.h"
+#include "intel_uc.h"
+#include "intel_uc_debugfs.h"
+
+static int uc_usage_show(struct seq_file *m, void *data)
+{
+ struct intel_uc *uc = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n",
+ str_yes_no(intel_uc_supports_guc(uc)),
+ str_yes_no(intel_uc_wants_guc(uc)),
+ str_yes_no(intel_uc_uses_guc(uc)));
+ drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n",
+ str_yes_no(intel_uc_supports_huc(uc)),
+ str_yes_no(intel_uc_wants_huc(uc)),
+ str_yes_no(intel_uc_uses_huc(uc)));
+ drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n",
+ str_yes_no(intel_uc_supports_guc_submission(uc)),
+ str_yes_no(intel_uc_wants_guc_submission(uc)),
+ str_yes_no(intel_uc_uses_guc_submission(uc)));
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(uc_usage);
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "usage", &uc_usage_fops, NULL },
+ };
+ struct dentry *root;
+
+ if (!gt_root)
+ return;
+
+ /* GuC and HuC go always in pair, no need to check both */
+ if (!intel_uc_supports_guc(uc))
+ return;
+
+ root = debugfs_create_dir("uc", gt_root);
+ if (IS_ERR(root))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
+
+ intel_guc_debugfs_register(&uc->guc, root);
+ intel_huc_debugfs_register(&uc->huc, root);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
new file mode 100644
index 000000000..010ce250d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef DEBUGFS_UC_H
+#define DEBUGFS_UC_H
+
+struct intel_uc;
+struct dentry;
+
+void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root);
+
+#endif /* DEBUGFS_UC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
new file mode 100644
index 000000000..b91ad4aed
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <linux/highmem.h>
+
+#include <drm/drm_cache.h>
+#include <drm/drm_print.h>
+
+#include "gem/i915_gem_lmem.h"
+#include "intel_uc_fw.h"
+#include "intel_uc_fw_abi.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+static inline struct intel_gt *
+____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
+{
+ if (type == INTEL_UC_FW_TYPE_GUC)
+ return container_of(uc_fw, struct intel_gt, uc.guc.fw);
+
+ GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
+ return container_of(uc_fw, struct intel_gt, uc.huc.fw);
+}
+
+static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+{
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ return ____uc_fw_to_gt(uc_fw, uc_fw->type);
+}
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status)
+{
+ uc_fw->__status = status;
+ drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
+ "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
+}
+#endif
+
+/*
+ * List of required GuC and HuC binaries per-platform.
+ * Must be ordered based on platform + revid, from newer to older.
+ *
+ * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
+ * firmware as TGL.
+ *
+ * Version numbers:
+ * Originally, the driver required an exact match major/minor/patch furmware
+ * file and only supported that one version for any given platform. However,
+ * the new direction from upstream is to be backwards compatible with all
+ * prior releases and to be as flexible as possible as to what firmware is
+ * loaded.
+ *
+ * For GuC, the major version number signifies a backwards breaking API change.
+ * So, new format GuC firmware files are labelled by their major version only.
+ * For HuC, there is no KMD interaction, hence no version matching requirement.
+ * So, new format HuC firmware files have no version number at all.
+ *
+ * All of which means that the table below must keep all old format files with
+ * full three point version number. But newer files have reduced requirements.
+ * Having said that, the driver still needs to track the minor version number
+ * for GuC at least. As it is useful to report to the user that they are not
+ * running with a recent enough version for all KMD supported features,
+ * security fixes, etc. to be enabled.
+ */
+#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
+ fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \
+ fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \
+ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
+ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \
+ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
+ fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \
+ fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
+ fw_def(ELKHARTLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
+ fw_def(ICELAKE, 0, guc_mmp(icl, 70, 1, 1)) \
+ fw_def(COMETLAKE, 5, guc_mmp(cml, 70, 1, 1)) \
+ fw_def(COMETLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(COFFEELAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(GEMINILAKE, 0, guc_mmp(glk, 70, 1, 1)) \
+ fw_def(KABYLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(BROXTON, 0, guc_mmp(bxt, 70, 1, 1)) \
+ fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
+
+#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \
+ fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
+ fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
+ fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(DG1, 0, huc_raw(dg1)) \
+ fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
+ fw_def(ELKHARTLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, huc_mmp(icl, 9, 0, 0)) \
+ fw_def(COMETLAKE, 5, huc_mmp(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, huc_mmp(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, huc_mmp(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, huc_mmp(skl, 2, 0, 0))
+
+/*
+ * Set of macros for producing a list of filenames from the above table.
+ */
+#define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
+ "i915/" \
+ __stringify(prefix_) name_ ".bin"
+
+#define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
+ "i915/" \
+ __stringify(prefix_) name_ \
+ __stringify(major_) ".bin"
+
+#define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
+ "i915/" \
+ __stringify(prefix_) name_ \
+ __stringify(major_) "." \
+ __stringify(minor_) "." \
+ __stringify(patch_) ".bin"
+
+/* Minor for internal driver use, not part of file name */
+#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_) \
+ __MAKE_UC_FW_PATH_MAJOR(prefix_, "_guc_", major_)
+
+#define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH_MMP(prefix_, "_guc_", major_, minor_, patch_)
+
+#define MAKE_HUC_FW_PATH_BLANK(prefix_) \
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc")
+
+#define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH_MMP(prefix_, "_huc_", major_, minor_, patch_)
+
+/*
+ * All blobs need to be declared via MODULE_FIRMWARE().
+ * This first expansion of the table macros is solely to provide
+ * that declaration.
+ */
+#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
+ MODULE_FIRMWARE(uc_);
+
+INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP)
+INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP)
+
+/*
+ * The next expansion of the table macros (in __uc_fw_auto_select below) provides
+ * actual data structures with both the filename and the version information.
+ * These structure arrays are then iterated over to the list of suitable files
+ * for the current platform and to then attempt to load those files, in the order
+ * listed, until one is successfully found.
+ */
+struct __packed uc_fw_blob {
+ const char *path;
+ bool legacy;
+ u8 major;
+ u8 minor;
+ u8 patch;
+};
+
+#define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .major = major_, \
+ .minor = minor_, \
+ .patch = patch_, \
+ .path = path_,
+
+#define UC_FW_BLOB_NEW(major_, minor_, patch_, path_) \
+ { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .legacy = false }
+
+#define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \
+ { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .legacy = true }
+
+#define GUC_FW_BLOB(prefix_, major_, minor_) \
+ UC_FW_BLOB_NEW(major_, minor_, 0, \
+ MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_))
+
+#define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_OLD(major_, minor_, patch_, \
+ MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
+
+#define HUC_FW_BLOB(prefix_) \
+ UC_FW_BLOB_NEW(0, 0, 0, MAKE_HUC_FW_PATH_BLANK(prefix_))
+
+#define HUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_OLD(major_, minor_, patch_, \
+ MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
+
+struct __packed uc_fw_platform_requirement {
+ enum intel_platform p;
+ u8 rev; /* first platform rev using this FW */
+ const struct uc_fw_blob blob;
+};
+
+#define MAKE_FW_LIST(platform_, revid_, uc_) \
+{ \
+ .p = INTEL_##platform_, \
+ .rev = revid_, \
+ .blob = uc_, \
+},
+
+struct fw_blobs_by_type {
+ const struct uc_fw_platform_requirement *blobs;
+ u32 count;
+};
+
+static void
+__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
+{
+ static const struct uc_fw_platform_requirement blobs_guc[] = {
+ INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
+ };
+ static const struct uc_fw_platform_requirement blobs_huc[] = {
+ INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP)
+ };
+ static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
+ [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
+ [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
+ };
+ static bool verified;
+ const struct uc_fw_platform_requirement *fw_blobs;
+ enum intel_platform p = INTEL_INFO(i915)->platform;
+ u32 fw_count;
+ u8 rev = INTEL_REVID(i915);
+ int i;
+ bool found;
+
+ /*
+ * The only difference between the ADL GuC FWs is the HWConfig support.
+ * ADL-N does not support HWConfig, so we should use the same binary as
+ * ADL-S, otherwise the GuC might attempt to fetch a config table that
+ * does not exist.
+ */
+ if (IS_ADLP_N(i915))
+ p = INTEL_ALDERLAKE_S;
+
+ GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
+ fw_blobs = blobs_all[uc_fw->type].blobs;
+ fw_count = blobs_all[uc_fw->type].count;
+
+ found = false;
+ for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
+ const struct uc_fw_blob *blob = &fw_blobs[i].blob;
+
+ if (p != fw_blobs[i].p)
+ continue;
+
+ if (rev < fw_blobs[i].rev)
+ continue;
+
+ if (uc_fw->file_selected.path) {
+ if (uc_fw->file_selected.path == blob->path)
+ uc_fw->file_selected.path = NULL;
+
+ continue;
+ }
+
+ uc_fw->file_selected.path = blob->path;
+ uc_fw->file_wanted.path = blob->path;
+ uc_fw->file_wanted.major_ver = blob->major;
+ uc_fw->file_wanted.minor_ver = blob->minor;
+ found = true;
+ break;
+ }
+
+ if (!found && uc_fw->file_selected.path) {
+ /* Failed to find a match for the last attempt?! */
+ uc_fw->file_selected.path = NULL;
+ }
+
+ /* make sure the list is ordered as expected */
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) {
+ verified = true;
+
+ for (i = 1; i < fw_count; i++) {
+ /* Next platform is good: */
+ if (fw_blobs[i].p < fw_blobs[i - 1].p)
+ continue;
+
+ /* Next platform revision is good: */
+ if (fw_blobs[i].p == fw_blobs[i - 1].p &&
+ fw_blobs[i].rev < fw_blobs[i - 1].rev)
+ continue;
+
+ /* Platform/revision must be in order: */
+ if (fw_blobs[i].p != fw_blobs[i - 1].p ||
+ fw_blobs[i].rev != fw_blobs[i - 1].rev)
+ goto bad;
+
+ /* Next major version is good: */
+ if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
+ continue;
+
+ /* New must be before legacy: */
+ if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
+ goto bad;
+
+ /* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
+ if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
+ if (!fw_blobs[i - 1].blob.major)
+ continue;
+
+ if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
+ continue;
+ }
+
+ /* Major versions must be in order: */
+ if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
+ goto bad;
+
+ /* Next minor version is good: */
+ if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
+ continue;
+
+ /* Minor versions must be in order: */
+ if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
+ goto bad;
+
+ /* Patch versions must be in order: */
+ if (fw_blobs[i].blob.patch <= fw_blobs[i - 1].blob.patch)
+ continue;
+
+bad:
+ drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
+ fw_blobs[i - 1].blob.legacy ? "L" : "v",
+ fw_blobs[i - 1].blob.major,
+ fw_blobs[i - 1].blob.minor,
+ fw_blobs[i - 1].blob.patch,
+ intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
+ fw_blobs[i].blob.legacy ? "L" : "v",
+ fw_blobs[i].blob.major,
+ fw_blobs[i].blob.minor,
+ fw_blobs[i].blob.patch);
+
+ uc_fw->file_selected.path = NULL;
+ }
+ }
+}
+
+static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
+{
+ if (i915->params.enable_guc & ENABLE_GUC_MASK)
+ return i915->params.guc_firmware_path;
+ return "";
+}
+
+static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
+{
+ if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
+ return i915->params.huc_firmware_path;
+ return "";
+}
+
+static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
+{
+ const char *path = NULL;
+
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ path = __override_guc_firmware_path(i915);
+ break;
+ case INTEL_UC_FW_TYPE_HUC:
+ path = __override_huc_firmware_path(i915);
+ break;
+ }
+
+ if (unlikely(path)) {
+ uc_fw->file_selected.path = path;
+ uc_fw->user_overridden = true;
+ }
+}
+
+/**
+ * intel_uc_fw_init_early - initialize the uC object and select the firmware
+ * @uc_fw: uC firmware
+ * @type: type of uC
+ *
+ * Initialize the state of our uC object and relevant tracking and select the
+ * firmware to fetch and load.
+ */
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type)
+{
+ struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
+
+ /*
+ * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
+ * before we're looked at the HW caps to see if we have uc support
+ */
+ BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
+ GEM_BUG_ON(uc_fw->status);
+ GEM_BUG_ON(uc_fw->file_selected.path);
+
+ uc_fw->type = type;
+
+ if (HAS_GT_UC(i915)) {
+ __uc_fw_auto_select(i915, uc_fw);
+ __uc_fw_user_override(i915, uc_fw);
+ }
+
+ intel_uc_fw_change_status(uc_fw, uc_fw->file_selected.path ? *uc_fw->file_selected.path ?
+ INTEL_UC_FIRMWARE_SELECTED :
+ INTEL_UC_FIRMWARE_DISABLED :
+ INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+}
+
+static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
+{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ bool user = e == -EINVAL;
+
+ if (i915_inject_probe_error(i915, e)) {
+ /* non-existing blob */
+ uc_fw->file_selected.path = "<invalid>";
+ uc_fw->user_overridden = user;
+ } else if (i915_inject_probe_error(i915, e)) {
+ /* require next major version */
+ uc_fw->file_wanted.major_ver += 1;
+ uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->user_overridden = user;
+ } else if (i915_inject_probe_error(i915, e)) {
+ /* require next minor version */
+ uc_fw->file_wanted.minor_ver += 1;
+ uc_fw->user_overridden = user;
+ } else if (uc_fw->file_wanted.major_ver &&
+ i915_inject_probe_error(i915, e)) {
+ /* require prev major version */
+ uc_fw->file_wanted.major_ver -= 1;
+ uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->user_overridden = user;
+ } else if (uc_fw->file_wanted.minor_ver &&
+ i915_inject_probe_error(i915, e)) {
+ /* require prev minor version - hey, this should work! */
+ uc_fw->file_wanted.minor_ver -= 1;
+ uc_fw->user_overridden = user;
+ } else if (user && i915_inject_probe_error(i915, e)) {
+ /* officially unsupported platform */
+ uc_fw->file_wanted.major_ver = 0;
+ uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->user_overridden = true;
+ }
+}
+
+static int check_gsc_manifest(const struct firmware *fw,
+ struct intel_uc_fw *uc_fw)
+{
+ u32 *dw = (u32 *)fw->data;
+ u32 version_hi = dw[HUC_GSC_VERSION_HI_DW];
+ u32 version_lo = dw[HUC_GSC_VERSION_LO_DW];
+
+ uc_fw->file_selected.major_ver = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.minor_ver = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.patch_ver = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo);
+
+ return 0;
+}
+
+static int check_ccs_header(struct drm_i915_private *i915,
+ const struct firmware *fw,
+ struct intel_uc_fw *uc_fw)
+{
+ struct uc_css_header *css;
+ size_t size;
+
+ /* Check the size of the blob before examining buffer contents */
+ if (unlikely(fw->size < sizeof(struct uc_css_header))) {
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
+ return -ENODATA;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Check integrity of size values inside CSS header */
+ size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
+ css->exponent_size_dw) * sizeof(u32);
+ if (unlikely(size != sizeof(struct uc_css_header))) {
+ drm_warn(&i915->drm,
+ "%s firmware %s: unexpected header size: %zu != %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
+ return -EPROTO;
+ }
+
+ /* uCode size must calculated from other sizes */
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (unlikely(fw->size < size)) {
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, size);
+ return -ENOEXEC;
+ }
+
+ /* Sanity check whether this fw is not larger than whole WOPCM memory */
+ size = __intel_uc_fw_get_upload_size(uc_fw);
+ if (unlikely(size >= i915->wopcm.size)) {
+ drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ size, (size_t)i915->wopcm.size);
+ return -E2BIG;
+ }
+
+ /* Get version numbers from the CSS header */
+ uc_fw->file_selected.major_ver = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->sw_version);
+ uc_fw->file_selected.minor_ver = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->sw_version);
+ uc_fw->file_selected.patch_ver = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
+ css->sw_version);
+
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ uc_fw->private_data_size = css->private_data_size;
+
+ return 0;
+}
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ * @uc_fw: uC firmware
+ *
+ * Fetch uC firmware into GEM obj.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ struct intel_uc_fw_file file_ideal;
+ struct device *dev = i915->drm.dev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ bool old_ver = false;
+ int err;
+
+ GEM_BUG_ON(!i915->wopcm.size);
+ GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
+
+ err = i915_inject_probe_error(i915, -ENXIO);
+ if (err)
+ goto fail;
+
+ __force_fw_fetch_failures(uc_fw, -EINVAL);
+ __force_fw_fetch_failures(uc_fw, -ESTALE);
+
+ err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
+
+ /* Any error is terminal if overriding. Don't bother searching for older versions */
+ if (err && intel_uc_fw_is_overridden(uc_fw))
+ goto fail;
+
+ while (err == -ENOENT) {
+ old_ver = true;
+
+ __uc_fw_auto_select(i915, uc_fw);
+ if (!uc_fw->file_selected.path) {
+ /*
+ * No more options! But set the path back to something
+ * valid just in case it gets dereferenced.
+ */
+ uc_fw->file_selected.path = file_ideal.path;
+
+ /* Also, preserve the version that was really wanted */
+ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
+ break;
+ }
+
+ err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ }
+
+ if (err)
+ goto fail;
+
+ if (uc_fw->loaded_via_gsc)
+ err = check_gsc_manifest(fw, uc_fw);
+ else
+ err = check_ccs_header(i915, fw, uc_fw);
+ if (err)
+ goto fail;
+
+ if (uc_fw->file_wanted.major_ver) {
+ /* Check the file's major version was as it claimed */
+ if (uc_fw->file_selected.major_ver != uc_fw->file_wanted.major_ver) {
+ drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver,
+ uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver);
+ if (!intel_uc_fw_is_overridden(uc_fw)) {
+ err = -ENOEXEC;
+ goto fail;
+ }
+ } else {
+ if (uc_fw->file_selected.minor_ver < uc_fw->file_wanted.minor_ver)
+ old_ver = true;
+ }
+ }
+
+ if (old_ver) {
+ /* Preserve the version that was really wanted */
+ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
+
+ drm_notice(&i915->drm,
+ "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver);
+ drm_info(&i915->drm,
+ "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
+ }
+
+ if (HAS_LMEM(i915)) {
+ obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
+ if (!IS_ERR(obj))
+ obj->flags |= I915_BO_ALLOC_PM_EARLY;
+ } else {
+ obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ }
+
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+
+ release_firmware(fw);
+ return 0;
+
+fail:
+ intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
+ INTEL_UC_FIRMWARE_MISSING :
+ INTEL_UC_FIRMWARE_ERROR);
+
+ i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err);
+ drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+ return err;
+}
+
+static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
+{
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ struct drm_mm_node *node = &ggtt->uc_fw;
+
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
+ GEM_BUG_ON(upper_32_bits(node->start));
+ GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
+
+ return lower_32_bits(node->start);
+}
+
+static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ struct i915_vma_resource *dummy = &uc_fw->dummy;
+ u32 pte_flags = 0;
+
+ dummy->start = uc_fw_ggtt_offset(uc_fw);
+ dummy->node_size = obj->base.size;
+ dummy->bi.pages = obj->mm.pages;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
+
+ /* uc_fw->obj cache domains were not controlled across suspend */
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(dummy->bi.pages);
+
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
+
+ if (ggtt->vm.raw_insert_entries)
+ ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ else
+ ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+}
+
+static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ u64 start = uc_fw_ggtt_offset(uc_fw);
+
+ ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
+}
+
+static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct intel_uncore *uncore = gt->uncore;
+ u64 offset;
+ int ret;
+
+ ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
+ if (ret)
+ return ret;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ /* Set the source address for the uCode */
+ offset = uc_fw_ggtt_offset(uc_fw);
+ GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
+ intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
+ intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
+
+ /* Set the DMA destination */
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ /*
+ * Set the transfer size. The header plus uCode will be copied to WOPCM
+ * via DMA, excluding any other components
+ */
+ intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
+ sizeof(struct uc_css_header) + uc_fw->ucode_size);
+
+ /* Start the DMA */
+ intel_uncore_write_fw(uncore, DMA_CTRL,
+ _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
+ if (ret)
+ drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uncore_read_fw(uncore, DMA_CTRL));
+
+ /* Disable the bits once DMA is over */
+ intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/**
+ * intel_uc_fw_upload - load uC firmware using custom loader
+ * @uc_fw: uC firmware
+ * @dst_offset: destination offset
+ * @dma_flags: flags for flags for dma ctrl
+ *
+ * Loads uC firmware and updates internal flags.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ int err;
+
+ /* make sure the status was cleared the last time we reset the uc */
+ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+
+ err = i915_inject_probe_error(gt->i915, -ENOEXEC);
+ if (err)
+ return err;
+
+ if (!intel_uc_fw_is_loadable(uc_fw))
+ return -ENOEXEC;
+
+ /* Call custom loader */
+ uc_fw_bind_ggtt(uc_fw);
+ err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
+ uc_fw_unbind_ggtt(uc_fw);
+ if (err)
+ goto fail;
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
+ return 0;
+
+fail:
+ i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ err);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
+ return err;
+}
+
+static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
+{
+ /*
+ * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
+ * while it reads it from the 64 RSA registers if it is smaller.
+ * The HuC RSA is always read from memory.
+ */
+ return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
+}
+
+static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct i915_vma *vma;
+ size_t copied;
+ void *vaddr;
+ int err;
+
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
+ if (!uc_fw_need_rsa_in_memory(uc_fw))
+ return 0;
+
+ /*
+ * uC firmwares will sit above GUC_GGTT_TOP and will not map through
+ * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
+ * authentication from memory, as the RSA offset now falls within the
+ * GuC inaccessible range. We resort to perma-pinning an additional vma
+ * within the accessible range that only contains the RSA signature.
+ * The GuC HW can use this extra pinning to perform the authentication
+ * since its GGTT offset will be GuC accessible.
+ */
+ GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
+ vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(gt->i915, vma->obj, true));
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ err = PTR_ERR(vaddr);
+ goto unpin_out;
+ }
+
+ copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
+ i915_gem_object_unpin_map(vma->obj);
+
+ if (copied < uc_fw->rsa_size) {
+ err = -ENOMEM;
+ goto unpin_out;
+ }
+
+ uc_fw->rsa_data = vma;
+
+ return 0;
+
+unpin_out:
+ i915_vma_unpin_and_release(&vma, 0);
+ return err;
+}
+
+static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
+{
+ i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
+}
+
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
+{
+ int err;
+
+ /* this should happen before the load! */
+ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+
+ if (!intel_uc_fw_is_available(uc_fw))
+ return -ENOEXEC;
+
+ err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto out;
+ }
+
+ err = uc_fw_rsa_data_create(uc_fw);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto out_unpin;
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(uc_fw->obj);
+out:
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
+ return err;
+}
+
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ uc_fw_rsa_data_destroy(uc_fw);
+
+ if (i915_gem_object_has_pinned_pages(uc_fw->obj))
+ i915_gem_object_unpin_pages(uc_fw->obj);
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+}
+
+/**
+ * intel_uc_fw_cleanup_fetch - cleanup uC firmware
+ * @uc_fw: uC firmware
+ *
+ * Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
+{
+ if (!intel_uc_fw_is_available(uc_fw))
+ return;
+
+ i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
+}
+
+/**
+ * intel_uc_fw_copy_rsa - copy fw RSA to buffer
+ *
+ * @uc_fw: uC firmware
+ * @dst: dst buffer
+ * @max_len: max number of bytes to copy
+ *
+ * Return: number of copied bytes.
+ */
+size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
+{
+ struct intel_memory_region *mr = uc_fw->obj->mm.region;
+ u32 size = min_t(u32, uc_fw->rsa_size, max_len);
+ u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
+ struct sgt_iter iter;
+ size_t count = 0;
+ int idx;
+
+ /* Called during reset handling, must be atomic [no fs_reclaim] */
+ GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
+
+ idx = offset >> PAGE_SHIFT;
+ offset = offset_in_page(offset);
+ if (i915_gem_object_has_struct_page(uc_fw->obj)) {
+ struct page *page;
+
+ for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
+ u32 len = min_t(u32, size, PAGE_SIZE - offset);
+ void *vaddr;
+
+ if (idx > 0) {
+ idx--;
+ continue;
+ }
+
+ vaddr = kmap_atomic(page);
+ memcpy(dst, vaddr + offset, len);
+ kunmap_atomic(vaddr);
+
+ offset = 0;
+ dst += len;
+ size -= len;
+ count += len;
+ if (!size)
+ break;
+ }
+ } else {
+ dma_addr_t addr;
+
+ for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
+ u32 len = min_t(u32, size, PAGE_SIZE - offset);
+ void __iomem *vaddr;
+
+ if (idx > 0) {
+ idx--;
+ continue;
+ }
+
+ vaddr = io_mapping_map_atomic_wc(&mr->iomap,
+ addr - mr->region.start);
+ memcpy_fromio(dst, vaddr + offset, len);
+ io_mapping_unmap_atomic(vaddr);
+
+ offset = 0;
+ dst += len;
+ size -= len;
+ count += len;
+ if (!size)
+ break;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * intel_uc_fw_dump - dump information about uC firmware
+ * @uc_fw: uC firmware
+ * @p: the &drm_printer
+ *
+ * Pretty printer for uC firmware.
+ */
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
+{
+ u32 ver_sel, ver_want;
+
+ drm_printf(p, "%s firmware: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
+ if (uc_fw->file_selected.path != uc_fw->file_wanted.path)
+ drm_printf(p, "%s firmware wanted: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
+ drm_printf(p, "\tstatus: %s\n",
+ intel_uc_fw_status_repr(uc_fw->status));
+ ver_sel = MAKE_UC_VER(uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ ver_want = MAKE_UC_VER(uc_fw->file_wanted.major_ver,
+ uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.patch_ver);
+ if (ver_sel < ver_want)
+ drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
+ uc_fw->file_wanted.major_ver,
+ uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.patch_ver,
+ uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ else
+ drm_printf(p, "\tversion: found %u.%u.%u\n",
+ uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
+ drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
new file mode 100644
index 000000000..cb586f7df
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_FW_H_
+#define _INTEL_UC_FW_H_
+
+#include <linux/types.h>
+#include "intel_uc_fw_abi.h"
+#include "intel_device_info.h"
+#include "i915_gem.h"
+#include "i915_vma.h"
+
+struct drm_printer;
+struct drm_i915_private;
+struct intel_gt;
+
+/* Home of GuC, HuC and DMC firmwares */
+#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
+
+/*
+ * +------------+---------------------------------------------------+
+ * | PHASE | FIRMWARE STATUS TRANSITIONS |
+ * +============+===================================================+
+ * | | UNINITIALIZED |
+ * +------------+- / | \ -+
+ * | | DISABLED <--/ | \--> NOT_SUPPORTED |
+ * | init_early | V |
+ * | | SELECTED |
+ * +------------+- / | \ -+
+ * | | MISSING <--/ | \--> ERROR |
+ * | fetch | V |
+ * | | AVAILABLE |
+ * +------------+- | \ -+
+ * | | | \--> INIT FAIL |
+ * | init | V |
+ * | | /------> LOADABLE <----<-----------\ |
+ * +------------+- \ / \ \ \ -+
+ * | | LOAD FAIL <--< \--> TRANSFERRED \ |
+ * | upload | \ / \ / |
+ * | | \---------/ \--> RUNNING |
+ * +------------+---------------------------------------------------+
+ */
+
+enum intel_uc_fw_status {
+ INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */
+ INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */
+ INTEL_UC_FIRMWARE_DISABLED, /* disabled */
+ INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */
+ INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
+ INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
+ INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_INIT_FAIL, /* failed to prepare fw objects for load */
+ INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
+ INTEL_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */
+ INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
+ INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
+};
+
+enum intel_uc_fw_type {
+ INTEL_UC_FW_TYPE_GUC = 0,
+ INTEL_UC_FW_TYPE_HUC
+};
+#define INTEL_UC_FW_NUM_TYPES 2
+
+/*
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ */
+struct intel_uc_fw_file {
+ const char *path;
+ u16 major_ver;
+ u16 minor_ver;
+ u16 patch_ver;
+};
+
+/*
+ * This structure encapsulates all the data needed during the process
+ * of fetching, caching, and loading the firmware image into the uC.
+ */
+struct intel_uc_fw {
+ enum intel_uc_fw_type type;
+ union {
+ const enum intel_uc_fw_status status;
+ enum intel_uc_fw_status __status; /* no accidental overwrites */
+ };
+ struct intel_uc_fw_file file_wanted;
+ struct intel_uc_fw_file file_selected;
+ bool user_overridden;
+ size_t size;
+ struct drm_i915_gem_object *obj;
+
+ /**
+ * @dummy: A vma used in binding the uc fw to ggtt. We can't define this
+ * vma on the stack as it can lead to a stack overflow, so we define it
+ * here. Safe to have 1 copy per uc fw because the binding is single
+ * threaded as it done during driver load (inherently single threaded)
+ * or during a GT reset (mutex guarantees single threaded).
+ */
+ struct i915_vma_resource dummy;
+ struct i915_vma *rsa_data;
+
+ u32 rsa_size;
+ u32 ucode_size;
+ u32 private_data_size;
+
+ bool loaded_via_gsc;
+};
+
+#define MAKE_UC_VER(maj, min, pat) ((pat) | ((min) << 8) | ((maj) << 16))
+#define GET_UC_VER(uc) (MAKE_UC_VER((uc)->fw.file_selected.major_ver, \
+ (uc)->fw.file_selected.minor_ver, \
+ (uc)->fw.file_selected.patch_ver))
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status);
+#else
+static inline void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status)
+{
+ uc_fw->__status = status;
+}
+#endif
+
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return "N/A";
+ case INTEL_UC_FIRMWARE_UNINITIALIZED:
+ return "UNINITIALIZED";
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return "DISABLED";
+ case INTEL_UC_FIRMWARE_SELECTED:
+ return "SELECTED";
+ case INTEL_UC_FIRMWARE_MISSING:
+ return "MISSING";
+ case INTEL_UC_FIRMWARE_ERROR:
+ return "ERROR";
+ case INTEL_UC_FIRMWARE_AVAILABLE:
+ return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_INIT_FAIL:
+ return "INIT FAIL";
+ case INTEL_UC_FIRMWARE_LOADABLE:
+ return "LOADABLE";
+ case INTEL_UC_FIRMWARE_LOAD_FAIL:
+ return "LOAD FAIL";
+ case INTEL_UC_FIRMWARE_TRANSFERRED:
+ return "TRANSFERRED";
+ case INTEL_UC_FIRMWARE_RUNNING:
+ return "RUNNING";
+ }
+ return "<invalid>";
+}
+
+static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return -ENODEV;
+ case INTEL_UC_FIRMWARE_UNINITIALIZED:
+ return -EACCES;
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return -EPERM;
+ case INTEL_UC_FIRMWARE_MISSING:
+ return -ENOENT;
+ case INTEL_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ case INTEL_UC_FIRMWARE_INIT_FAIL:
+ case INTEL_UC_FIRMWARE_LOAD_FAIL:
+ return -EIO;
+ case INTEL_UC_FIRMWARE_SELECTED:
+ return -ESTALE;
+ case INTEL_UC_FIRMWARE_AVAILABLE:
+ case INTEL_UC_FIRMWARE_LOADABLE:
+ case INTEL_UC_FIRMWARE_TRANSFERRED:
+ case INTEL_UC_FIRMWARE_RUNNING:
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ return "GuC";
+ case INTEL_UC_FW_TYPE_HUC:
+ return "HuC";
+ }
+ return "uC";
+}
+
+static inline enum intel_uc_fw_status
+__intel_uc_fw_status(struct intel_uc_fw *uc_fw)
+{
+ /* shouldn't call this before checking hw/blob availability */
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ return uc_fw->status;
+}
+
+static inline bool intel_uc_fw_is_supported(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED;
+}
+
+static inline bool intel_uc_fw_is_enabled(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) > INTEL_UC_FIRMWARE_DISABLED;
+}
+
+static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
+}
+
+static inline bool intel_uc_fw_is_loadable(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE;
+}
+
+static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
+}
+
+static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
+}
+
+static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->user_overridden;
+}
+
+static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
+{
+ if (intel_uc_fw_is_loaded(uc_fw))
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOADABLE);
+}
+
+static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+ return sizeof(struct uc_css_header) + uc_fw->ucode_size;
+}
+
+/**
+ * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded.
+ * @uc_fw: uC firmware.
+ *
+ * Get the size of the firmware and header that will be uploaded to WOPCM.
+ *
+ * Return: Upload firmware size, or zero on firmware fetch failure.
+ */
+static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+ if (!intel_uc_fw_is_available(uc_fw))
+ return 0;
+
+ return __intel_uc_fw_get_upload_size(uc_fw);
+}
+
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type);
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
new file mode 100644
index 000000000..7a411178b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_FW_ABI_H
+#define _INTEL_UC_FW_ABI_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+/**
+ * DOC: Firmware Layout
+ *
+ * The GuC/HuC firmware layout looks like this::
+ *
+ * +======================================================================+
+ * | Firmware blob |
+ * +===============+===============+============+============+============+
+ * | CSS header | uCode | RSA key | modulus | exponent |
+ * +===============+===============+============+============+============+
+ * <-header size-> <---header size continued ----------->
+ * <--- size ----------------------------------------------------------->
+ * <-key size->
+ * <-mod size->
+ * <-exp size->
+ *
+ * The firmware may or may not have modulus key and exponent data. The header,
+ * uCode and RSA signature are must-have components that will be used by driver.
+ * Length of each components, which is all in dwords, can be found in header.
+ * In the case that modulus and exponent are not present in fw, a.k.a truncated
+ * image, the length value still appears in header.
+ *
+ * Driver will do some basic fw size validation based on the following rules:
+ *
+ * 1. Header, uCode and RSA are must-have components.
+ * 2. All firmware components, if they present, are in the sequence illustrated
+ * in the layout table above.
+ * 3. Length info of each component can be found in header, in dwords.
+ * 4. Modulus and exponent key are not required by driver. They may not appear
+ * in fw. So driver will load a truncated firmware in this case.
+ *
+ * Starting from DG2, the HuC is loaded by the GSC instead of i915. The GSC
+ * firmware performs all the required integrity checks, we just need to check
+ * the version. Note that the header for GSC-managed blobs is different from the
+ * CSS used for dma-loaded firmwares.
+ */
+
+struct uc_css_header {
+ u32 module_type;
+ /*
+ * header_size includes all non-uCode bits, including css_header, rsa
+ * key, modulus key and exponent data.
+ */
+ u32 header_size_dw;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
+ u32 date;
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
+ u32 size_dw; /* uCode plus header_size_dw */
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_DATE_MIN (0xFF << 8)
+#define CSS_DATE_SEC (0xFFFF << 16)
+ char username[8];
+ char buildnumber[12];
+ u32 sw_version;
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
+ u32 reserved0[13];
+ union {
+ u32 private_data_size; /* only applies to GuC */
+ u32 reserved1;
+ };
+ u32 header_info;
+} __packed;
+static_assert(sizeof(struct uc_css_header) == 128);
+
+#define HUC_GSC_VERSION_HI_DW 44
+#define HUC_GSC_MAJOR_VER_HI_MASK (0xFF << 0)
+#define HUC_GSC_MINOR_VER_HI_MASK (0xFF << 16)
+#define HUC_GSC_VERSION_LO_DW 45
+#define HUC_GSC_PATCH_VER_LO_MASK (0xFF << 0)
+
+#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
new file mode 100644
index 000000000..e28518fe8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright �� 2021 Intel Corporation
+ */
+
+#include "selftests/igt_spinner.h"
+#include "selftests/intel_scheduler_helpers.h"
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIMEDOUT;
+
+ return err;
+}
+
+static struct i915_request *nop_user_request(struct intel_context *ce,
+ struct i915_request *from)
+{
+ struct i915_request *rq;
+ int ret;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ if (from) {
+ ret = i915_sw_fence_await_dma_fence(&rq->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+ if (ret < 0) {
+ i915_request_put(rq);
+ return ERR_PTR(ret);
+ }
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int intel_guc_scrub_ctbs(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int ret = 0;
+ int i;
+ struct i915_request *last[3] = {NULL, NULL, NULL}, *rq;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+
+ if (!intel_has_gpu_reset(gt))
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ engine = intel_selftest_find_any_engine(gt);
+
+ /* Submit requests and inject errors forcing G2H to be dropped */
+ for (i = 0; i < 3; ++i) {
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ drm_err(&gt->i915->drm, "Failed to create context, %d: %d\n", i, ret);
+ goto err;
+ }
+
+ switch (i) {
+ case 0:
+ ce->drop_schedule_enable = true;
+ break;
+ case 1:
+ ce->drop_schedule_disable = true;
+ break;
+ case 2:
+ ce->drop_deregister = true;
+ break;
+ }
+
+ rq = nop_user_request(ce, NULL);
+ intel_context_put(ce);
+
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n", i, ret);
+ goto err;
+ }
+
+ last[i] = rq;
+ }
+
+ for (i = 0; i < 3; ++i) {
+ ret = i915_request_wait(last[i], 0, HZ);
+ if (ret < 0) {
+ drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ goto err;
+ }
+ i915_request_put(last[i]);
+ last[i] = NULL;
+ }
+
+ /* Force all H2G / G2H to be submitted / processed */
+ intel_gt_retire_requests(gt);
+ msleep(500);
+
+ /* Scrub missing G2H */
+ intel_gt_handle_error(engine->gt, -1, 0, "selftest reset");
+
+ /* GT will not idle if G2H are lost */
+ ret = intel_gt_wait_for_idle(gt, HZ);
+ if (ret < 0) {
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ goto err;
+ }
+
+err:
+ for (i = 0; i < 3; ++i)
+ if (last[i])
+ i915_request_put(last[i]);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ return ret;
+}
+
+/*
+ * intel_guc_steal_guc_ids - Test to exhaust all guc_ids and then steal one
+ *
+ * This test creates a spinner which is used to block all subsequent submissions
+ * until it completes. Next, a loop creates a context and a NOP request each
+ * iteration until the guc_ids are exhausted (request creation returns -EAGAIN).
+ * The spinner is ended, unblocking all requests created in the loop. At this
+ * point all guc_ids are exhausted but are available to steal. Try to create
+ * another request which should successfully steal a guc_id. Wait on last
+ * request to complete, idle GPU, verify a guc_id was stolen via a counter, and
+ * exit the test. Test also artificially reduces the number of guc_ids so the
+ * test runs in a timely manner.
+ */
+static int intel_guc_steal_guc_ids(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
+ int ret, sv, context_index = 0;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine;
+ struct intel_context **ce;
+ struct igt_spinner spin;
+ struct i915_request *spin_rq = NULL, *rq, *last = NULL;
+ int number_guc_id_stolen = guc->number_guc_id_stolen;
+
+ ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
+ if (!ce) {
+ drm_err(&gt->i915->drm, "Context array allocation failed\n");
+ return -ENOMEM;
+ }
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ engine = intel_selftest_find_any_engine(gt);
+ sv = guc->submission_state.num_guc_ids;
+ guc->submission_state.num_guc_ids = 512;
+
+ /* Create spinner to block requests in below loop */
+ ce[context_index] = intel_context_create(engine);
+ if (IS_ERR(ce[context_index])) {
+ ret = PTR_ERR(ce[context_index]);
+ ce[context_index] = NULL;
+ drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
+ goto err_wakeref;
+ }
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ goto err_contexts;
+ }
+ spin_rq = igt_spinner_create_request(&spin, ce[context_index],
+ MI_ARB_CHECK);
+ if (IS_ERR(spin_rq)) {
+ ret = PTR_ERR(spin_rq);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err_contexts;
+ }
+ ret = request_add_spin(spin_rq, &spin);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Use all guc_ids */
+ while (ret != -EAGAIN) {
+ ce[++context_index] = intel_context_create(engine);
+ if (IS_ERR(ce[context_index])) {
+ ret = PTR_ERR(ce[context_index--]);
+ ce[context_index] = NULL;
+ drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ rq = nop_user_request(ce[context_index], spin_rq);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ rq = NULL;
+ if (ret != -EAGAIN) {
+ drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n",
+ context_index, ret);
+ goto err_spin_rq;
+ }
+ } else {
+ if (last)
+ i915_request_put(last);
+ last = rq;
+ }
+ }
+
+ /* Release blocked requests */
+ igt_spinner_end(&spin);
+ ret = intel_selftest_wait_for_rq(spin_rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Spin request failed to complete: %d\n", ret);
+ i915_request_put(last);
+ goto err_spin_rq;
+ }
+ i915_request_put(spin_rq);
+ igt_spinner_fini(&spin);
+ spin_rq = NULL;
+
+ /* Wait for last request */
+ ret = i915_request_wait(last, 0, HZ * 30);
+ i915_request_put(last);
+ if (ret < 0) {
+ drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Try to steal guc_id */
+ rq = nop_user_request(ce[context_index], NULL);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ drm_err(&gt->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
+ goto err_spin_rq;
+ }
+
+ /* Wait for request with stolen guc_id */
+ ret = i915_request_wait(rq, 0, HZ);
+ i915_request_put(rq);
+ if (ret < 0) {
+ drm_err(&gt->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Wait for idle */
+ ret = intel_gt_wait_for_idle(gt, HZ * 30);
+ if (ret < 0) {
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ goto err_spin_rq;
+ }
+
+ /* Verify a guc_id was stolen */
+ if (guc->number_guc_id_stolen == number_guc_id_stolen) {
+ drm_err(&gt->i915->drm, "No guc_id was stolen");
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+err_spin_rq:
+ if (spin_rq) {
+ igt_spinner_end(&spin);
+ intel_selftest_wait_for_rq(spin_rq);
+ i915_request_put(spin_rq);
+ igt_spinner_fini(&spin);
+ intel_gt_wait_for_idle(gt, HZ * 30);
+ }
+err_contexts:
+ for (; context_index >= 0 && ce[context_index]; --context_index)
+ intel_context_put(ce[context_index]);
+err_wakeref:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ kfree(ce);
+ guc->submission_state.num_guc_ids = sv;
+
+ return ret;
+}
+
+int intel_guc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_guc_scrub_ctbs),
+ SUBTEST(intel_guc_steal_guc_ids),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
new file mode 100644
index 000000000..01f8cd3c3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_reset.h"
+#include "selftests/intel_scheduler_helpers.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gem/selftests/mock_context.h"
+
+#define BEAT_INTERVAL 100
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int intel_hang_guc(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int ret = 0;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ unsigned int reset_count;
+ u32 guc_status;
+ u32 old_beat;
+
+ ctx = kernel_context(gt->i915, NULL);
+ if (IS_ERR(ctx)) {
+ drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
+ return PTR_ERR(ctx);
+ }
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ ce = intel_context_create(gt->engine[BCS0]);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err;
+ }
+
+ engine = ce->engine;
+ reset_count = i915_reset_count(global);
+
+ old_beat = engine->props.heartbeat_interval_ms;
+ ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ goto err;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err_spin;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ goto err_spin;
+ }
+
+ ret = intel_reset_guc(gt);
+ if (ret) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
+ goto err_spin;
+ }
+
+ guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
+ if (!(guc_status & GS_MIA_IN_RESET)) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
+ ret = -EIO;
+ goto err_spin;
+ }
+
+ /* Wait for the heartbeat to cause a reset */
+ ret = intel_selftest_wait_for_rq(rq);
+ i915_request_put(rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Request failed to complete: %d\n", ret);
+ goto err_spin;
+ }
+
+ if (i915_reset_count(global) == reset_count) {
+ drm_err(&gt->i915->drm, "Failed to record a GPU reset\n");
+ ret = -EINVAL;
+ goto err_spin;
+ }
+
+err_spin:
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ intel_engine_set_heartbeat(engine, old_beat);
+
+ if (ret == 0) {
+ rq = nop_request(engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto err;
+ }
+
+ ret = intel_selftest_wait_for_rq(rq);
+ i915_request_put(rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "No-op failed to complete: %d\n", ret);
+ goto err;
+ }
+ }
+
+err:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ kernel_context_close(ctx);
+
+ return ret;
+}
+
+int intel_guc_hang_check(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_hang_guc),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
new file mode 100644
index 000000000..d17982c36
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright �� 2019 Intel Corporation
+ */
+
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_reset.h"
+#include "selftests/intel_scheduler_helpers.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gem/selftests/mock_context.h"
+
+static void logical_sort(struct intel_engine_cs **engines, int num_engines)
+{
+ struct intel_engine_cs *sorted[MAX_ENGINE_INSTANCE + 1];
+ int i, j;
+
+ for (i = 0; i < num_engines; ++i)
+ for (j = 0; j < MAX_ENGINE_INSTANCE + 1; ++j) {
+ if (engines[j]->logical_mask & BIT(i)) {
+ sorted[i] = engines[j];
+ break;
+ }
+ }
+
+ memcpy(*engines, *sorted,
+ sizeof(struct intel_engine_cs *) * num_engines);
+}
+
+static struct intel_context *
+multi_lrc_create_parent(struct intel_gt *gt, u8 class,
+ unsigned long flags)
+{
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int i = 0;
+
+ for_each_engine(engine, gt, id) {
+ if (engine->class != class)
+ continue;
+
+ siblings[i++] = engine;
+ }
+
+ if (i <= 1)
+ return ERR_PTR(0);
+
+ logical_sort(siblings, i);
+
+ return intel_engine_create_parallel(siblings, 1, i);
+}
+
+static void multi_lrc_context_unpin(struct intel_context *ce)
+{
+ struct intel_context *child;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ for_each_child(ce, child)
+ intel_context_unpin(child);
+ intel_context_unpin(ce);
+}
+
+static void multi_lrc_context_put(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ /*
+ * Only the parent gets the creation ref put in the uAPI, the parent
+ * itself is responsible for creation ref put on the children.
+ */
+ intel_context_put(ce);
+}
+
+static struct i915_request *
+multi_lrc_nop_request(struct intel_context *ce)
+{
+ struct intel_context *child;
+ struct i915_request *rq, *child_rq;
+ int i = 0;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ for_each_child(ce, child) {
+ child_rq = intel_context_create_request(child);
+ if (IS_ERR(child_rq))
+ goto child_error;
+
+ if (++i == ce->parallel.number_children)
+ set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL,
+ &child_rq->fence.flags);
+ i915_request_add(child_rq);
+ }
+
+ return rq;
+
+child_error:
+ i915_request_put(rq);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
+{
+ struct intel_context *parent;
+ struct i915_request *rq;
+ int ret;
+
+ parent = multi_lrc_create_parent(gt, class, 0);
+ if (IS_ERR(parent)) {
+ drm_err(&gt->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
+ return PTR_ERR(parent);
+ } else if (!parent) {
+ drm_dbg(&gt->i915->drm, "Not enough engines in class: %d", class);
+ return 0;
+ }
+
+ rq = multi_lrc_nop_request(parent);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ drm_err(&gt->i915->drm, "Failed creating requests: %d", ret);
+ goto out;
+ }
+
+ ret = intel_selftest_wait_for_rq(rq);
+ if (ret)
+ drm_err(&gt->i915->drm, "Failed waiting on request: %d", ret);
+
+ i915_request_put(rq);
+
+ if (ret >= 0) {
+ ret = intel_gt_wait_for_idle(gt, HZ * 5);
+ if (ret < 0)
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ }
+
+out:
+ multi_lrc_context_unpin(parent);
+ multi_lrc_context_put(parent);
+ return ret;
+}
+
+static int intel_guc_multi_lrc_basic(void *arg)
+{
+ struct intel_gt *gt = arg;
+ unsigned int class;
+ int ret;
+
+ for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
+ /* We don't support breadcrumb handshake on these classes */
+ if (class == COMPUTE_CLASS || class == RENDER_CLASS)
+ continue;
+
+ ret = __intel_guc_multi_lrc_basic(gt, class);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_guc_multi_lrc_basic),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}