summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/i915/selftests
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c352
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c256
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c569
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c2328
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h54
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h35
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c439
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c109
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h65
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c3288
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c440
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c757
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_syncmap.c616
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c1107
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.h17
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c35
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.h14
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c73
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h35
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.c52
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.h21
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c51
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h18
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c277
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h43
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c1411
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c99
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h35
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c350
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c136
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.h43
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.h17
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.h45
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c252
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.h12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c136
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h37
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c124
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.h22
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c59
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h37
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c49
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h34
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c380
47 files changed, 14431 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
new file mode 100644
index 000000000..b61fe850e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -0,0 +1,352 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/kref.h>
+#include <linux/string_helpers.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+
+struct live_active {
+ struct i915_active base;
+ struct kref ref;
+ bool retired;
+};
+
+static void __live_get(struct live_active *active)
+{
+ kref_get(&active->ref);
+}
+
+static void __live_free(struct live_active *active)
+{
+ i915_active_fini(&active->base);
+ kfree(active);
+}
+
+static void __live_release(struct kref *ref)
+{
+ struct live_active *active = container_of(ref, typeof(*active), ref);
+
+ __live_free(active);
+}
+
+static void __live_put(struct live_active *active)
+{
+ kref_put(&active->ref, __live_release);
+}
+
+static int __live_active(struct i915_active *base)
+{
+ struct live_active *active = container_of(base, typeof(*active), base);
+
+ __live_get(active);
+ return 0;
+}
+
+static void __live_retire(struct i915_active *base)
+{
+ struct live_active *active = container_of(base, typeof(*active), base);
+
+ active->retired = true;
+ __live_put(active);
+}
+
+static struct live_active *__live_alloc(struct drm_i915_private *i915)
+{
+ struct live_active *active;
+
+ active = kzalloc(sizeof(*active), GFP_KERNEL);
+ if (!active)
+ return NULL;
+
+ kref_init(&active->ref);
+ i915_active_init(&active->base, __live_active, __live_retire, 0);
+
+ return active;
+}
+
+static struct live_active *
+__live_active_setup(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ struct i915_sw_fence *submit;
+ struct live_active *active;
+ unsigned int count = 0;
+ int err = 0;
+
+ active = __live_alloc(i915);
+ if (!active)
+ return ERR_PTR(-ENOMEM);
+
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ kfree(active);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = i915_active_acquire(&active->base);
+ if (err)
+ goto out;
+
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ submit,
+ GFP_KERNEL);
+ if (err >= 0)
+ err = i915_active_add_request(&active->base, rq);
+ i915_request_add(rq);
+ if (err) {
+ pr_err("Failed to track active ref!\n");
+ break;
+ }
+
+ count++;
+ }
+
+ i915_active_release(&active->base);
+ if (READ_ONCE(active->retired) && count) {
+ pr_err("i915_active retired before submission!\n");
+ err = -EINVAL;
+ }
+ if (atomic_read(&active->base.count) != count) {
+ pr_err("i915_active not tracking all requests, found %d, expected %d\n",
+ atomic_read(&active->base.count), count);
+ err = -EINVAL;
+ }
+
+out:
+ i915_sw_fence_commit(submit);
+ heap_fence_put(submit);
+ if (err) {
+ __live_put(active);
+ active = ERR_PTR(err);
+ }
+
+ return active;
+}
+
+static int live_active_wait(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct live_active *active;
+ int err = 0;
+
+ /* Check that we get a callback when requests retire upon waiting */
+
+ active = __live_active_setup(i915);
+ if (IS_ERR(active))
+ return PTR_ERR(active);
+
+ __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(active->retired)) {
+ struct drm_printer p = drm_err_printer(__func__);
+
+ pr_err("i915_active not retired after waiting!\n");
+ i915_active_print(&active->base, &p);
+
+ err = -EINVAL;
+ }
+
+ __live_put(active);
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ return err;
+}
+
+static int live_active_retire(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct live_active *active;
+ int err = 0;
+
+ /* Check that we get a callback when requests are indirectly retired */
+
+ active = __live_active_setup(i915);
+ if (IS_ERR(active))
+ return PTR_ERR(active);
+
+ /* waits for & retires all requests */
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ if (!READ_ONCE(active->retired)) {
+ struct drm_printer p = drm_err_printer(__func__);
+
+ pr_err("i915_active not retired after flushing!\n");
+ i915_active_print(&active->base, &p);
+
+ err = -EINVAL;
+ }
+
+ __live_put(active);
+
+ return err;
+}
+
+static int live_active_barrier(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_active *active;
+ int err = 0;
+
+ /* Check that we get a callback when requests retire upon waiting */
+
+ active = __live_alloc(i915);
+ if (!active)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&active->base);
+ if (err)
+ goto out;
+
+ for_each_uabi_engine(engine, i915) {
+ err = i915_active_acquire_preallocate_barrier(&active->base,
+ engine);
+ if (err)
+ break;
+
+ i915_active_acquire_barrier(&active->base);
+ }
+
+ i915_active_release(&active->base);
+ if (err)
+ goto out;
+
+ __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(active->retired)) {
+ pr_err("i915_active not retired after flushing barriers!\n");
+ err = -EINVAL;
+ }
+
+out:
+ __live_put(active);
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ return err;
+}
+
+int i915_active_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_active_wait),
+ SUBTEST(live_active_retire),
+ SUBTEST(live_active_barrier),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
+
+static struct intel_engine_cs *node_to_barrier(struct active_node *it)
+{
+ struct intel_engine_cs *engine;
+
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ return engine;
+}
+
+void i915_active_print(struct i915_active *ref, struct drm_printer *m)
+{
+ drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
+ drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
+ drm_printf(m, "\tpreallocated barriers? %s\n",
+ str_yes_no(!llist_empty(&ref->preallocated_barriers)));
+
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ struct intel_engine_cs *engine;
+
+ engine = node_to_barrier(it);
+ if (engine) {
+ drm_printf(m, "\tbarrier: %s\n", engine->name);
+ continue;
+ }
+
+ if (i915_active_fence_isset(&it->base)) {
+ drm_printf(m,
+ "\ttimeline: %llx\n", it->timeline);
+ continue;
+ }
+ }
+
+ i915_active_release(ref);
+ }
+}
+
+static void spin_unlock_wait(spinlock_t *lock)
+{
+ spin_lock_irq(lock);
+ spin_unlock_irq(lock);
+}
+
+static void active_flush(struct i915_active *ref,
+ struct i915_active_fence *active)
+{
+ struct dma_fence *fence;
+
+ fence = xchg(__active_fence_slot(active), NULL);
+ if (!fence)
+ return;
+
+ spin_lock_irq(fence->lock);
+ __list_del_entry(&active->cb.node);
+ spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+ atomic_dec(&ref->count);
+
+ GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
+void i915_active_unlock_wait(struct i915_active *ref)
+{
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ /* Wait for all active callbacks */
+ rcu_read_lock();
+ active_flush(ref, &ref->excl);
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+ active_flush(ref, &it->base);
+ rcu_read_unlock();
+
+ i915_active_release(ref);
+ }
+
+ /* And wait for the retire callback */
+ spin_unlock_wait(&ref->tree_lock);
+
+ /* ... which may have been on a thread instead */
+ flush_work(&ref->work);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
new file mode 100644
index 000000000..e5dd82e7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -0,0 +1,256 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/random.h>
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "mock_drm.h"
+
+static int switch_to_context(struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ i915_gem_context_unlock_engines(ctx);
+
+ return err;
+}
+
+static void trash_stolen(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ const u64 slot = ggtt->error_capture.start;
+ const resource_size_t size = resource_size(&i915->dsm);
+ unsigned long page;
+ u32 prng = 0x12345678;
+
+ /* XXX: fsck. needs some more thought... */
+ if (!i915_ggtt_has_aperture(ggtt))
+ return;
+
+ for (page = 0; page < size; page += PAGE_SIZE) {
+ const dma_addr_t dma = i915->dsm.start + page;
+ u32 __iomem *s;
+ int x;
+
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+
+ s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
+ for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
+ prng = next_pseudo_random32(prng);
+ iowrite32(prng, &s[x]);
+ }
+ io_mapping_unmap_atomic(s);
+ }
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+}
+
+static void simulate_hibernate(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ /*
+ * As a final sting in the tail, invalidate stolen. Under a real S4,
+ * stolen is lost and needs to be refilled on resume. However, under
+ * CI we merely do S4-device testing (as full S4 is too unreliable
+ * for automated testing across a cluster), so to simulate the effect
+ * of stolen being trashed across S4, we trash it ourselves.
+ */
+ trash_stolen(i915);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+static int igt_pm_prepare(struct drm_i915_private *i915)
+{
+ i915_gem_suspend(i915);
+
+ return 0;
+}
+
+static void igt_pm_suspend(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ i915_ggtt_suspend(to_gt(i915)->ggtt);
+ i915_gem_suspend_late(i915);
+ }
+}
+
+static void igt_pm_hibernate(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ i915_ggtt_suspend(to_gt(i915)->ggtt);
+
+ i915_gem_freeze(i915);
+ i915_gem_freeze_late(i915);
+ }
+}
+
+static void igt_pm_resume(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ /*
+ * Both suspend and hibernate follow the same wakeup path and assume
+ * that runtime-pm just works.
+ */
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ i915_ggtt_resume(to_gt(i915)->ggtt);
+ i915_gem_resume(i915);
+ }
+}
+
+static int igt_gem_suspend(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct file *file;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ err = -ENOMEM;
+ ctx = live_context(i915, file);
+ if (!IS_ERR(ctx))
+ err = switch_to_context(ctx);
+ if (err)
+ goto out;
+
+ err = igt_pm_prepare(i915);
+ if (err)
+ goto out;
+
+ igt_pm_suspend(i915);
+
+ /* Here be dragons! Note that with S3RST any S3 may become S4! */
+ simulate_hibernate(i915);
+
+ igt_pm_resume(i915);
+
+ err = switch_to_context(ctx);
+out:
+ fput(file);
+ return err;
+}
+
+static int igt_gem_hibernate(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct file *file;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ err = -ENOMEM;
+ ctx = live_context(i915, file);
+ if (!IS_ERR(ctx))
+ err = switch_to_context(ctx);
+ if (err)
+ goto out;
+
+ err = igt_pm_prepare(i915);
+ if (err)
+ goto out;
+
+ igt_pm_hibernate(i915);
+
+ /* Here be dragons! */
+ simulate_hibernate(i915);
+
+ igt_pm_resume(i915);
+
+ err = switch_to_context(ctx);
+out:
+ fput(file);
+ return err;
+}
+
+static int igt_gem_ww_ctx(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *obj2;
+ struct i915_gem_ww_ctx ww;
+ int err = 0;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj2)) {
+ err = PTR_ERR(obj2);
+ goto put1;
+ }
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ /* Lock the objects, twice for good measure (-EALREADY handling) */
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock_interruptible(obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock_interruptible(obj2, &ww);
+ if (!err)
+ err = i915_gem_object_lock(obj2, &ww);
+
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ i915_gem_object_put(obj2);
+put1:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+int i915_gem_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_suspend),
+ SUBTEST(igt_gem_hibernate),
+ SUBTEST(igt_gem_ww_ctx),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
new file mode 100644
index 000000000..37068542a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
+
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+
+static void quirk_add(struct drm_i915_gem_object *obj,
+ struct list_head *objects)
+{
+ /* quirk is only for live tiled objects, use it to declare ownership */
+ GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
+ list_add(&obj->st_link, objects);
+}
+
+static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ if (vma == ERR_PTR(-ENOSPC))
+ break;
+
+ return PTR_ERR(vma);
+ }
+
+ quirk_add(obj, objects);
+ count++;
+ } while (1);
+ pr_debug("Filled GGTT with %lu pages [%llu total]\n",
+ count, ggtt->vm.total / PAGE_SIZE);
+
+ if (list_empty(&ggtt->vm.bound_list)) {
+ pr_err("No objects on the GGTT inactive list!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void unpin_ggtt(struct i915_ggtt *ggtt)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+ if (i915_gem_object_has_tiling_quirk(vma->obj))
+ i915_vma_unpin(vma);
+}
+
+static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
+{
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, list, st_link) {
+ GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
+ i915_gem_object_set_tiling_quirk(obj);
+ i915_gem_object_put(obj);
+ }
+
+ i915_gem_drain_freed_objects(ggtt->vm.i915);
+}
+
+static int igt_evict_something(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ LIST_HEAD(objects);
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict one. */
+
+ err = populate_ggtt(ggtt, &objects);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_something(&ggtt->vm, NULL,
+ I915_GTT_PAGE_SIZE, 0, 0,
+ 0, U64_MAX,
+ 0);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err != -ENOSPC) {
+ pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(ggtt);
+
+ /* Everything is unpinned, we should be able to evict something */
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_something(&ggtt->vm, NULL,
+ I915_GTT_PAGE_SIZE, 0, 0,
+ 0, U64_MAX,
+ 0);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err) {
+ pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(ggtt, &objects);
+ return err;
+}
+
+static int igt_overcommit(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ LIST_HEAD(objects);
+ int err;
+
+ /* Fill the GGTT with pinned objects and then try to pin one more.
+ * We expect it to fail.
+ */
+
+ err = populate_ggtt(ggtt, &objects);
+ if (err)
+ goto cleanup;
+
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+
+ quirk_add(obj, &objects);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (vma != ERR_PTR(-ENOSPC)) {
+ pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(ggtt, &objects);
+ return err;
+}
+
+static int igt_evict_for_vma(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct drm_mm_node target = {
+ .start = 0,
+ .size = 4096,
+ };
+ LIST_HEAD(objects);
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict a range. */
+
+ err = populate_ggtt(ggtt, &objects);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err != -ENOSPC) {
+ pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(ggtt);
+
+ /* Everything is unpinned, we should be able to evict the node */
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err) {
+ pr_err("i915_gem_evict_for_node returned err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(ggtt, &objects);
+ return err;
+}
+
+static void mock_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
+{
+}
+
+static int igt_evict_for_cache_color(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ const unsigned long flags = PIN_OFFSET_FIXED;
+ struct drm_mm_node target = {
+ .start = I915_GTT_PAGE_SIZE * 2,
+ .size = I915_GTT_PAGE_SIZE,
+ .color = I915_CACHE_LLC,
+ };
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ LIST_HEAD(objects);
+ int err;
+
+ /*
+ * Currently the use of color_adjust for the GGTT is limited to cache
+ * coloring and guard pages, and so the presence of mm.color_adjust for
+ * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
+ * color adjust will work just fine for our purposes.
+ */
+ ggtt->vm.mm.color_adjust = mock_color_adjust;
+ GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
+
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+ quirk_add(obj, &objects);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ I915_GTT_PAGE_SIZE | flags);
+ if (IS_ERR(vma)) {
+ pr_err("[0]i915_gem_object_ggtt_pin failed\n");
+ err = PTR_ERR(vma);
+ goto cleanup;
+ }
+
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+ quirk_add(obj, &objects);
+
+ /* Neighbouring; same colour - should fit */
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ (I915_GTT_PAGE_SIZE * 2) | flags);
+ if (IS_ERR(vma)) {
+ pr_err("[1]i915_gem_object_ggtt_pin failed\n");
+ err = PTR_ERR(vma);
+ goto cleanup;
+ }
+
+ i915_vma_unpin(vma);
+
+ /* Remove just the second vma */
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err) {
+ pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
+ goto cleanup;
+ }
+
+ /* Attempt to remove the first *pinned* vma, by removing the (empty)
+ * neighbour -- this should fail.
+ */
+ target.color = I915_CACHE_L3_LLC;
+
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (!err) {
+ pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ err = 0;
+
+cleanup:
+ unpin_ggtt(ggtt);
+ cleanup_objects(ggtt, &objects);
+ ggtt->vm.mm.color_adjust = NULL;
+ return err;
+}
+
+static int igt_evict_vm(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct i915_gem_ww_ctx ww;
+ LIST_HEAD(objects);
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict everything. */
+
+ err = populate_ggtt(ggtt, &objects);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err) {
+ pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(ggtt);
+
+ for_i915_gem_ww(&ww, err, false) {
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
+ mutex_unlock(&ggtt->vm.mutex);
+ }
+
+ if (err) {
+ pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(ggtt, &objects);
+ return err;
+}
+
+static int igt_evict_contexts(void *arg)
+{
+ const u64 PRETEND_GGTT_SIZE = 16ull << 20;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct reserved {
+ struct drm_mm_node node;
+ struct reserved *next;
+ } *reserved = NULL;
+ intel_wakeref_t wakeref;
+ struct drm_mm_node hole;
+ unsigned long count;
+ int err;
+
+ /*
+ * The purpose of this test is to verify that we will trigger an
+ * eviction in the GGTT when constructing a request that requires
+ * additional space in the GGTT for pinning the context. This space
+ * is not directly tied to the request so reclaiming it requires
+ * extra work.
+ *
+ * As such this test is only meaningful for full-ppgtt environments
+ * where the GTT space of the request is separate from the GGTT
+ * allocation required to build the request.
+ */
+ if (!HAS_FULL_PPGTT(i915))
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ /* Reserve a block so that we know we have enough to fit a few rq */
+ memset(&hole, 0, sizeof(hole));
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_gtt_insert(&ggtt->vm, NULL, &hole,
+ PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->vm.total,
+ PIN_NOEVICT);
+ if (err)
+ goto out_locked;
+
+ /* Make the GGTT appear small by filling it with unevictable nodes */
+ count = 0;
+ do {
+ struct reserved *r;
+
+ mutex_unlock(&ggtt->vm.mutex);
+ r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+ mutex_lock(&ggtt->vm.mutex);
+ if (!r) {
+ err = -ENOMEM;
+ goto out_locked;
+ }
+
+ if (i915_gem_gtt_insert(&ggtt->vm, NULL, &r->node,
+ 1ul << 20, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->vm.total,
+ PIN_NOEVICT)) {
+ kfree(r);
+ break;
+ }
+
+ r->next = reserved;
+ reserved = r;
+
+ count++;
+ } while (1);
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&ggtt->vm.mutex);
+ pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
+
+ /* Overfill the GGTT with context objects and so try to evict one. */
+ for_each_engine(engine, gt, id) {
+ struct i915_sw_fence fence;
+ struct i915_request *last = NULL;
+
+ count = 0;
+ onstack_fence_init(&fence);
+ do {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ break;
+
+ /* We will need some GGTT space for the rq's context */
+ igt_evict_ctl.fail_if_busy = true;
+ rq = intel_context_create_request(ce);
+ igt_evict_ctl.fail_if_busy = false;
+ intel_context_put(ce);
+
+ if (IS_ERR(rq)) {
+ /* When full, fail_if_busy will trigger EBUSY */
+ if (PTR_ERR(rq) != -EBUSY) {
+ pr_err("Unexpected error from request alloc (on %s): %d\n",
+ engine->name,
+ (int)PTR_ERR(rq));
+ err = PTR_ERR(rq);
+ }
+ break;
+ }
+
+ /* Keep every request/ctx pinned until we are full */
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &fence,
+ GFP_KERNEL);
+ if (err < 0)
+ break;
+
+ i915_request_add(rq);
+ count++;
+ if (last)
+ i915_request_put(last);
+ last = i915_request_get(rq);
+ err = 0;
+ } while(1);
+ onstack_fence_fini(&fence);
+ pr_info("Submitted %lu contexts/requests on %s\n",
+ count, engine->name);
+ if (err)
+ break;
+ if (last) {
+ if (i915_request_wait(last, 0, HZ) < 0) {
+ err = -EIO;
+ i915_request_put(last);
+ pr_err("Failed waiting for last request (on %s)",
+ engine->name);
+ break;
+ }
+ i915_request_put(last);
+ }
+ err = intel_gt_wait_for_idle(engine->gt, HZ * 3);
+ if (err) {
+ pr_err("Failed to idle GT (on %s)", engine->name);
+ break;
+ }
+ }
+
+ mutex_lock(&ggtt->vm.mutex);
+out_locked:
+ if (igt_flush_test(i915))
+ err = -EIO;
+ while (reserved) {
+ struct reserved *next = reserved->next;
+
+ drm_mm_remove_node(&reserved->node);
+ kfree(reserved);
+
+ reserved = next;
+ }
+ if (drm_mm_node_allocated(&hole))
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&ggtt->vm.mutex);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return err;
+}
+
+int i915_gem_evict_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_evict_something),
+ SUBTEST(igt_evict_for_vma),
+ SUBTEST(igt_evict_for_cache_color),
+ SUBTEST(igt_evict_vm),
+ SUBTEST(igt_overcommit),
+ };
+ struct drm_i915_private *i915;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ err = i915_subtests(tests, to_gt(i915));
+
+ mock_destroy_device(i915);
+ return err;
+}
+
+int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_evict_contexts),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
new file mode 100644
index 000000000..e050a2de5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -0,0 +1,2328 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/list_sort.h>
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_region.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_context.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gtt.h"
+
+#include "i915_random.h"
+#include "i915_selftest.h"
+#include "i915_vma_resource.h"
+
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+#include "mock_gtt.h"
+#include "igt_flush_test.h"
+
+static void cleanup_freed_objects(struct drm_i915_private *i915)
+{
+ i915_gem_drain_freed_objects(i915);
+}
+
+static void fake_free_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static int fake_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define PFN_BIAS 0x1000
+ struct sg_table *pages;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ typeof(obj->base.size) rem;
+
+ pages = kmalloc(sizeof(*pages), GFP);
+ if (!pages)
+ return -ENOMEM;
+
+ rem = round_up(obj->base.size, BIT(31)) >> 31;
+ if (sg_alloc_table(pages, rem, GFP)) {
+ kfree(pages);
+ return -ENOMEM;
+ }
+
+ sg_page_sizes = 0;
+ rem = obj->base.size;
+ for (sg = pages->sgl; sg; sg = sg_next(sg)) {
+ unsigned long len = min_t(typeof(rem), rem, BIT(31));
+
+ GEM_BUG_ON(!len);
+ sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
+ sg_dma_address(sg) = page_to_phys(sg_page(sg));
+ sg_dma_len(sg) = len;
+ sg_page_sizes |= len;
+
+ rem -= len;
+ }
+ GEM_BUG_ON(rem);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+
+ return 0;
+#undef GFP
+}
+
+static void fake_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ fake_free_pages(obj, pages);
+ obj->mm.dirty = false;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+ .name = "fake-gem",
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_pages,
+ .put_pages = fake_put_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_dma_object(struct drm_i915_private *i915, u64 size)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ goto err;
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
+
+ i915_gem_object_set_volatile(obj);
+
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ /* Preallocate the "backing storage" */
+ if (i915_gem_object_pin_pages_unlocked(obj))
+ goto err_obj;
+
+ i915_gem_object_unpin_pages(obj);
+ return obj;
+
+err_obj:
+ i915_gem_object_put(obj);
+err:
+ return ERR_PTR(-ENOMEM);
+}
+
+static int igt_ppgtt_alloc(void *arg)
+{
+ struct drm_i915_private *dev_priv = arg;
+ struct i915_ppgtt *ppgtt;
+ struct i915_gem_ww_ctx ww;
+ u64 size, last, limit;
+ int err = 0;
+
+ /* Allocate a ppggt and try to fill the entire range */
+
+ if (!HAS_PPGTT(dev_priv))
+ return 0;
+
+ ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ if (!ppgtt->vm.allocate_va_range)
+ goto err_ppgtt_cleanup;
+
+ /*
+ * While we only allocate the page tables here and so we could
+ * address a much larger GTT than we could actually fit into
+ * RAM, a practical limit is the amount of physical pages in the system.
+ * This should ensure that we do not run into the oomkiller during
+ * the test and take down the machine wilfully.
+ */
+ limit = totalram_pages() << PAGE_SHIFT;
+ limit = min(ppgtt->vm.total, limit);
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_vm_lock_objects(&ppgtt->vm, &ww);
+ if (err)
+ goto err_ppgtt_cleanup;
+
+ /* Check we can allocate the entire range */
+ for (size = 4096; size <= limit; size <<= 2) {
+ struct i915_vm_pt_stash stash = {};
+
+ err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
+ if (err)
+ goto err_ppgtt_cleanup;
+
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
+ if (err) {
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
+ goto err_ppgtt_cleanup;
+ }
+
+ ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
+ cond_resched();
+
+ ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
+
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
+ }
+
+ /* Check we can incrementally allocate the entire range */
+ for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
+ struct i915_vm_pt_stash stash = {};
+
+ err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
+ if (err)
+ goto err_ppgtt_cleanup;
+
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
+ if (err) {
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
+ goto err_ppgtt_cleanup;
+ }
+
+ ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
+ last, size - last);
+ cond_resched();
+
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
+ }
+
+err_ppgtt_cleanup:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ i915_vm_put(&ppgtt->vm);
+ return err;
+}
+
+static int lowlevel_hole(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ const unsigned int min_alignment =
+ i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+ I915_RND_STATE(seed_prng);
+ struct i915_vma_resource *mock_vma_res;
+ unsigned int size;
+
+ mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
+ if (!mock_vma_res)
+ return -ENOMEM;
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (size = 12; (hole_end - hole_start) >> size; size++) {
+ I915_RND_SUBSTATE(prng, seed_prng);
+ struct drm_i915_gem_object *obj;
+ unsigned int *order, count, n;
+ u64 hole_size, aligned_size;
+
+ aligned_size = max_t(u32, ilog2(min_alignment), size);
+ hole_size = (hole_end - hole_start) >> aligned_size;
+ if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
+ hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
+ count = hole_size >> 1;
+ if (!count) {
+ pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
+ __func__, hole_start, hole_end, size, hole_size);
+ break;
+ }
+
+ do {
+ order = i915_random_order(count, &prng);
+ if (order)
+ break;
+ } while (count >>= 1);
+ if (!count) {
+ kfree(mock_vma_res);
+ return -ENOMEM;
+ }
+ GEM_BUG_ON(!order);
+
+ GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
+ GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
+
+ /* Ignore allocation failures (i.e. don't report them as
+ * a test failure) as we are purposefully allocating very
+ * large objects without checking that we have sufficient
+ * memory. We expect to hit -ENOMEM.
+ */
+
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
+ if (IS_ERR(obj)) {
+ kfree(order);
+ break;
+ }
+
+ GEM_BUG_ON(obj->base.size != BIT_ULL(size));
+
+ if (i915_gem_object_pin_pages_unlocked(obj)) {
+ i915_gem_object_put(obj);
+ kfree(order);
+ break;
+ }
+
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
+ intel_wakeref_t wakeref;
+
+ GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
+
+ if (igt_timeout(end_time,
+ "%s timed out before %d/%d\n",
+ __func__, n, count)) {
+ hole_end = hole_start; /* quit */
+ break;
+ }
+
+ if (vm->allocate_va_range) {
+ struct i915_vm_pt_stash stash = {};
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_vm_lock_objects(vm, &ww);
+ if (err)
+ goto alloc_vm_end;
+
+ err = -ENOMEM;
+ if (i915_vm_alloc_pt_stash(vm, &stash,
+ BIT_ULL(size)))
+ goto alloc_vm_end;
+
+ err = i915_vm_map_pt_stash(vm, &stash);
+ if (!err)
+ vm->allocate_va_range(vm, &stash,
+ addr, BIT_ULL(size));
+ i915_vm_free_pt_stash(vm, &stash);
+alloc_vm_end:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (err)
+ break;
+ }
+
+ mock_vma_res->bi.pages = obj->mm.pages;
+ mock_vma_res->node_size = BIT_ULL(aligned_size);
+ mock_vma_res->start = addr;
+
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
+ vm->insert_entries(vm, mock_vma_res,
+ I915_CACHE_NONE, 0);
+ }
+ count = n;
+
+ i915_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
+ intel_wakeref_t wakeref;
+
+ GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
+ vm->clear_range(vm, addr, BIT_ULL(size));
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+
+ kfree(order);
+
+ cleanup_freed_objects(vm->i915);
+ }
+
+ kfree(mock_vma_res);
+ return 0;
+}
+
+static void close_object_list(struct list_head *objects,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_gem_object *obj, *on;
+ int ignored;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (!IS_ERR(vma))
+ ignored = i915_vma_unbind_unlocked(vma);
+
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+}
+
+static int fill_hole(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ const u64 hole_size = hole_end - hole_start;
+ struct drm_i915_gem_object *obj;
+ const unsigned int min_alignment =
+ i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+ const unsigned long max_pages =
+ min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
+ const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
+ unsigned long npages, prime, flags;
+ struct i915_vma *vma;
+ LIST_HEAD(objects);
+ int err;
+
+ /* Try binding many VMA working inwards from either edge */
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ for_each_prime_number_from(prime, 2, max_step) {
+ for (npages = 1; npages <= max_pages; npages *= prime) {
+ const u64 full_size = npages << PAGE_SHIFT;
+ const struct {
+ const char *name;
+ u64 offset;
+ int step;
+ } phases[] = {
+ { "top-down", hole_end, -1, },
+ { "bottom-up", hole_start, 1, },
+ { }
+ }, *p;
+
+ obj = fake_dma_object(vm->i915, full_size);
+ if (IS_ERR(obj))
+ break;
+
+ list_add(&obj->st_link, &objects);
+
+ /* Align differing sized objects against the edges, and
+ * check we don't walk off into the void when binding
+ * them into the GTT.
+ */
+ for (p = phases; p->name; p++) {
+ u64 offset;
+
+ offset = p->offset;
+ list_for_each_entry(obj, &objects, st_link) {
+ u64 aligned_size = round_up(obj->base.size,
+ min_alignment);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + aligned_size)
+ break;
+ offset -= aligned_size;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, offset | flags);
+ if (err) {
+ pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+ __func__, p->name, err, npages, prime, offset);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (p->step > 0) {
+ if (offset + aligned_size > hole_end)
+ break;
+ offset += aligned_size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry(obj, &objects, st_link) {
+ u64 aligned_size = round_up(obj->base.size,
+ min_alignment);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + aligned_size)
+ break;
+ offset -= aligned_size;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ err);
+ goto err;
+ }
+
+ if (p->step > 0) {
+ if (offset + aligned_size > hole_end)
+ break;
+ offset += aligned_size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry_reverse(obj, &objects, st_link) {
+ u64 aligned_size = round_up(obj->base.size,
+ min_alignment);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + aligned_size)
+ break;
+ offset -= aligned_size;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, offset | flags);
+ if (err) {
+ pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+ __func__, p->name, err, npages, prime, offset);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (p->step > 0) {
+ if (offset + aligned_size > hole_end)
+ break;
+ offset += aligned_size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry_reverse(obj, &objects, st_link) {
+ u64 aligned_size = round_up(obj->base.size,
+ min_alignment);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + aligned_size)
+ break;
+ offset -= aligned_size;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ err);
+ goto err;
+ }
+
+ if (p->step > 0) {
+ if (offset + aligned_size > hole_end)
+ break;
+ offset += aligned_size;
+ }
+ }
+ }
+
+ if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
+ __func__, npages, prime)) {
+ err = -EINTR;
+ goto err;
+ }
+ }
+
+ close_object_list(&objects, vm);
+ cleanup_freed_objects(vm->i915);
+ }
+
+ return 0;
+
+err:
+ close_object_list(&objects, vm);
+ return err;
+}
+
+static int walk_hole(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ const u64 hole_size = hole_end - hole_start;
+ const unsigned long max_pages =
+ min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
+ unsigned long min_alignment;
+ unsigned long flags;
+ u64 size;
+
+ /* Try binding a single VMA in different positions within the hole */
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+
+ for_each_prime_number_from(size, 1, max_pages) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u64 addr;
+ int err = 0;
+
+ obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ break;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put;
+ }
+
+ for (addr = hole_start;
+ addr + obj->base.size < hole_end;
+ addr += round_up(obj->base.size, min_alignment)) {
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
+ __func__, addr, vma->size,
+ hole_start, hole_end, err);
+ goto err_put;
+ }
+ i915_vma_unpin(vma);
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, vma->size);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("%s unbind failed at %llx + %llx with err=%d\n",
+ __func__, addr, vma->size, err);
+ goto err_put;
+ }
+
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ if (igt_timeout(end_time,
+ "%s timed out at %llx\n",
+ __func__, addr)) {
+ err = -EINTR;
+ goto err_put;
+ }
+ }
+
+err_put:
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+
+ cleanup_freed_objects(vm->i915);
+ }
+
+ return 0;
+}
+
+static int pot_hole(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int min_alignment;
+ unsigned long flags;
+ unsigned int pot;
+ int err = 0;
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+
+ obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ /* Insert a pair of pages across every pot boundary within the hole */
+ for (pot = fls64(hole_end - 1) - 1;
+ pot > ilog2(2 * min_alignment);
+ pot--) {
+ u64 step = BIT_ULL(pot);
+ u64 addr;
+
+ for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
+ hole_end > addr && hole_end - addr >= 2 * min_alignment;
+ addr += step) {
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
+ __func__,
+ addr,
+ hole_start, hole_end,
+ err);
+ goto err_obj;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, vma->size);
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind_unlocked(vma);
+ err = -EINVAL;
+ goto err_obj;
+ }
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind_unlocked(vma);
+ GEM_BUG_ON(err);
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out after %d/%d\n",
+ __func__, pot, fls64(hole_end - 1) - 1)) {
+ err = -EINTR;
+ goto err_obj;
+ }
+ }
+
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int drunk_hole(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ I915_RND_STATE(prng);
+ unsigned int min_alignment;
+ unsigned int size;
+ unsigned long flags;
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (size = 12; (hole_end - hole_start) >> size; size++) {
+ struct drm_i915_gem_object *obj;
+ unsigned int *order, count, n;
+ struct i915_vma *vma;
+ u64 hole_size, aligned_size;
+ int err = -ENODEV;
+
+ aligned_size = max_t(u32, ilog2(min_alignment), size);
+ hole_size = (hole_end - hole_start) >> aligned_size;
+ if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
+ hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
+ count = hole_size >> 1;
+ if (!count) {
+ pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
+ __func__, hole_start, hole_end, size, hole_size);
+ break;
+ }
+
+ do {
+ order = i915_random_order(count, &prng);
+ if (order)
+ break;
+ } while (count >>= 1);
+ if (!count)
+ return -ENOMEM;
+ GEM_BUG_ON(!order);
+
+ /* Ignore allocation failures (i.e. don't report them as
+ * a test failure) as we are purposefully allocating very
+ * large objects without checking that we have sufficient
+ * memory. We expect to hit -ENOMEM.
+ */
+
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
+ if (IS_ERR(obj)) {
+ kfree(order);
+ break;
+ }
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ GEM_BUG_ON(vma->size != BIT_ULL(size));
+
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
+
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
+ __func__,
+ addr, BIT_ULL(size),
+ hole_start, hole_end,
+ err);
+ goto err_obj;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, BIT_ULL(size));
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind_unlocked(vma);
+ err = -EINVAL;
+ goto err_obj;
+ }
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind_unlocked(vma);
+ GEM_BUG_ON(err);
+
+ if (igt_timeout(end_time,
+ "%s timed out after %d/%d\n",
+ __func__, n, count)) {
+ err = -EINTR;
+ goto err_obj;
+ }
+ }
+
+err_obj:
+ i915_gem_object_put(obj);
+ kfree(order);
+ if (err)
+ return err;
+
+ cleanup_freed_objects(vm->i915);
+ }
+
+ return 0;
+}
+
+static int __shrink_hole(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ struct drm_i915_gem_object *obj;
+ unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
+ unsigned int min_alignment;
+ unsigned int order = 12;
+ LIST_HEAD(objects);
+ int err = 0;
+ u64 addr;
+
+ min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (addr = hole_start; addr < hole_end; ) {
+ struct i915_vma *vma;
+ u64 size = BIT_ULL(order++);
+
+ size = min(size, hole_end - addr);
+ obj = fake_dma_object(vm->i915, size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
+
+ GEM_BUG_ON(vma->size != size);
+
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
+ __func__, addr, size, hole_start, hole_end, err);
+ break;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, size);
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind_unlocked(vma);
+ err = -EINVAL;
+ break;
+ }
+
+ i915_vma_unpin(vma);
+ addr += round_up(size, min_alignment);
+
+ /*
+ * Since we are injecting allocation faults at random intervals,
+ * wait for this allocation to complete before we change the
+ * faultinjection.
+ */
+ err = i915_vma_sync(vma);
+ if (err)
+ break;
+
+ if (igt_timeout(end_time,
+ "%s timed out at ofset %llx [%llx - %llx]\n",
+ __func__, addr, hole_start, hole_end)) {
+ err = -EINTR;
+ break;
+ }
+ }
+
+ close_object_list(&objects, vm);
+ cleanup_freed_objects(vm->i915);
+ return err;
+}
+
+static int shrink_hole(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ unsigned long prime;
+ int err;
+
+ vm->fault_attr.probability = 999;
+ atomic_set(&vm->fault_attr.times, -1);
+
+ for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
+ vm->fault_attr.interval = prime;
+ err = __shrink_hole(vm, hole_start, hole_end, end_time);
+ if (err)
+ break;
+ }
+
+ memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+
+ return err;
+}
+
+static int shrink_boom(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ unsigned int sizes[] = { SZ_2M, SZ_1G };
+ struct drm_i915_gem_object *purge;
+ struct drm_i915_gem_object *explode;
+ int err;
+ int i;
+
+ /*
+ * Catch the case which shrink_hole seems to miss. The setup here
+ * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
+ * ensuring that all vma assiocated with the respective pd/pdp are
+ * unpinned at the time.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int size = sizes[i];
+ struct i915_vma *vma;
+
+ purge = fake_dma_object(vm->i915, size);
+ if (IS_ERR(purge))
+ return PTR_ERR(purge);
+
+ vma = i915_vma_instance(purge, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_purge;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto err_purge;
+
+ /* Should now be ripe for purging */
+ i915_vma_unpin(vma);
+
+ explode = fake_dma_object(vm->i915, size);
+ if (IS_ERR(explode)) {
+ err = PTR_ERR(explode);
+ goto err_purge;
+ }
+
+ vm->fault_attr.probability = 100;
+ vm->fault_attr.interval = 1;
+ atomic_set(&vm->fault_attr.times, -1);
+
+ vma = i915_vma_instance(explode, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_explode;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags | size);
+ if (err)
+ goto err_explode;
+
+ i915_vma_unpin(vma);
+
+ i915_gem_object_put(purge);
+ i915_gem_object_put(explode);
+
+ memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+ cleanup_freed_objects(vm->i915);
+ }
+
+ return 0;
+
+err_explode:
+ i915_gem_object_put(explode);
+err_purge:
+ i915_gem_object_put(purge);
+ memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+ return err;
+}
+
+static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
+ u64 addr, u64 size, unsigned long flags)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err = 0;
+ u64 expected_vma_size, expected_node_size;
+ bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
+ mr->type == INTEL_MEMORY_STOLEN_LOCAL;
+
+ obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj)) {
+ /* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
+ if (PTR_ERR(obj) == -ENODEV && is_stolen)
+ return 0;
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err)
+ goto err_put;
+ i915_vma_unpin(vma);
+
+ if (!drm_mm_node_allocated(&vma->node)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
+ expected_node_size = expected_vma_size;
+
+ if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
+ /*
+ * The compact-pt should expand lmem node to 2MB for the ppGTT,
+ * for all other cases we should only expect 64K.
+ */
+ expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
+ if (NEEDS_COMPACT_PT(vm->i915) && !i915_is_ggtt(vm))
+ expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+ else
+ expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
+ }
+
+ if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
+ err = i915_vma_unbind_unlocked(vma);
+ err = -EBADSLT;
+ goto err_put;
+ }
+
+ err = i915_vma_unbind_unlocked(vma);
+ if (err)
+ goto err_put;
+
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+err_put:
+ i915_gem_object_put(obj);
+ cleanup_freed_objects(vm->i915);
+ return err;
+}
+
+static int misaligned_pin(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ struct intel_memory_region *mr;
+ enum intel_region_id id;
+ unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
+ int err = 0;
+ u64 hole_size = hole_end - hole_start;
+
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ for_each_memory_region(mr, vm->i915, id) {
+ u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
+ u64 size = min_alignment;
+ u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
+
+ /* avoid -ENOSPC on very small hole setups */
+ if (hole_size < 3 * min_alignment)
+ continue;
+
+ /* we can't test < 4k alignment due to flags being encoded in lower bits */
+ if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
+ err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
+ /* misaligned should error with -EINVAL*/
+ if (!err)
+ err = -EBADSLT;
+ if (err != -EINVAL)
+ return err;
+ }
+
+ /* test for vma->size expansion to min page size */
+ err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
+ if (err)
+ return err;
+
+ /* test for intermediate size not expanding vma->size for large alignments */
+ err = misaligned_case(vm, mr, addr, size / 2, flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int exercise_ppgtt(struct drm_i915_private *dev_priv,
+ int (*func)(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ struct i915_ppgtt *ppgtt;
+ IGT_TIMEOUT(end_time);
+ struct file *file;
+ int err;
+
+ if (!HAS_FULL_PPGTT(dev_priv))
+ return 0;
+
+ file = mock_file(dev_priv);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_free;
+ }
+ GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
+ assert_vm_alive(&ppgtt->vm);
+
+ err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
+
+ i915_vm_put(&ppgtt->vm);
+
+out_free:
+ fput(file);
+ return err;
+}
+
+static int igt_ppgtt_fill(void *arg)
+{
+ return exercise_ppgtt(arg, fill_hole);
+}
+
+static int igt_ppgtt_walk(void *arg)
+{
+ return exercise_ppgtt(arg, walk_hole);
+}
+
+static int igt_ppgtt_pot(void *arg)
+{
+ return exercise_ppgtt(arg, pot_hole);
+}
+
+static int igt_ppgtt_drunk(void *arg)
+{
+ return exercise_ppgtt(arg, drunk_hole);
+}
+
+static int igt_ppgtt_lowlevel(void *arg)
+{
+ return exercise_ppgtt(arg, lowlevel_hole);
+}
+
+static int igt_ppgtt_shrink(void *arg)
+{
+ return exercise_ppgtt(arg, shrink_hole);
+}
+
+static int igt_ppgtt_shrink_boom(void *arg)
+{
+ return exercise_ppgtt(arg, shrink_boom);
+}
+
+static int igt_ppgtt_misaligned_pin(void *arg)
+{
+ return exercise_ppgtt(arg, misaligned_pin);
+}
+
+static int sort_holes(void *priv, const struct list_head *A,
+ const struct list_head *B)
+{
+ struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
+ struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
+
+ if (a->start < b->start)
+ return -1;
+ else
+ return 1;
+}
+
+static int exercise_ggtt(struct drm_i915_private *i915,
+ int (*func)(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ u64 hole_start, hole_end, last = 0;
+ struct drm_mm_node *node;
+ IGT_TIMEOUT(end_time);
+ int err = 0;
+
+restart:
+ list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
+ drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
+ if (hole_start < last)
+ continue;
+
+ if (ggtt->vm.mm.color_adjust)
+ ggtt->vm.mm.color_adjust(node, 0,
+ &hole_start, &hole_end);
+ if (hole_start >= hole_end)
+ continue;
+
+ err = func(&ggtt->vm, hole_start, hole_end, end_time);
+ if (err)
+ break;
+
+ /* As we have manipulated the drm_mm, the list may be corrupt */
+ last = hole_end;
+ goto restart;
+ }
+
+ return err;
+}
+
+static int igt_ggtt_fill(void *arg)
+{
+ return exercise_ggtt(arg, fill_hole);
+}
+
+static int igt_ggtt_walk(void *arg)
+{
+ return exercise_ggtt(arg, walk_hole);
+}
+
+static int igt_ggtt_pot(void *arg)
+{
+ return exercise_ggtt(arg, pot_hole);
+}
+
+static int igt_ggtt_drunk(void *arg)
+{
+ return exercise_ggtt(arg, drunk_hole);
+}
+
+static int igt_ggtt_lowlevel(void *arg)
+{
+ return exercise_ggtt(arg, lowlevel_hole);
+}
+
+static int igt_ggtt_misaligned_pin(void *arg)
+{
+ return exercise_ggtt(arg, misaligned_pin);
+}
+
+static int igt_ggtt_page(void *arg)
+{
+ const unsigned int count = PAGE_SIZE/sizeof(u32);
+ I915_RND_STATE(prng);
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ struct drm_mm_node tmp;
+ unsigned int *order, n;
+ int err;
+
+ if (!i915_ggtt_has_aperture(ggtt))
+ return 0;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err)
+ goto out_free;
+
+ memset(&tmp, 0, sizeof(tmp));
+ mutex_lock(&ggtt->vm.mutex);
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
+ count * PAGE_SIZE, 0,
+ I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err)
+ goto out_unpin;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ for (n = 0; n < count; n++) {
+ u64 offset = tmp.start + n * PAGE_SIZE;
+
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
+ }
+
+ order = i915_random_order(count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
+
+ for (n = 0; n < count; n++) {
+ u64 offset = tmp.start + order[n] * PAGE_SIZE;
+ u32 __iomem *vaddr;
+
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
+ iowrite32(n, vaddr + n);
+ io_mapping_unmap_atomic(vaddr);
+ }
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+
+ i915_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ u64 offset = tmp.start + order[n] * PAGE_SIZE;
+ u32 __iomem *vaddr;
+ u32 val;
+
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
+ val = ioread32(vaddr + n);
+ io_mapping_unmap_atomic(vaddr);
+
+ if (val != n) {
+ pr_err("insert page failed: found %d, expected %d\n",
+ val, n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ kfree(order);
+out_remove:
+ ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_lock(&ggtt->vm.mutex);
+ drm_mm_remove_node(&tmp);
+ mutex_unlock(&ggtt->vm.mutex);
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_free:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void track_vma_bind(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ __i915_gem_object_pin_pages(obj);
+
+ GEM_BUG_ON(atomic_read(&vma->pages_count));
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+ __i915_gem_object_pin_pages(obj);
+ vma->pages = obj->mm.pages;
+ vma->resource->bi.pages = vma->pages;
+
+ mutex_lock(&vma->vm->mutex);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ mutex_unlock(&vma->vm->mutex);
+}
+
+static int exercise_mock(struct drm_i915_private *i915,
+ int (*func)(struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ const u64 limit = totalram_pages() << PAGE_SHIFT;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ctx = mock_context(i915, "mock");
+ if (!ctx)
+ return -ENOMEM;
+
+ vm = i915_gem_context_get_eb_vm(ctx);
+ err = func(vm, 0, min(vm->total, limit), end_time);
+ i915_vm_put(vm);
+
+ mock_context_close(ctx);
+ return err;
+}
+
+static int igt_mock_fill(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, fill_hole);
+}
+
+static int igt_mock_walk(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, walk_hole);
+}
+
+static int igt_mock_pot(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, pot_hole);
+}
+
+static int igt_mock_drunk(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, drunk_hole);
+}
+
+static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
+{
+ struct i915_address_space *vm = vma->vm;
+ struct i915_vma_resource *vma_res;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err;
+
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res))
+ return PTR_ERR(vma_res);
+
+ mutex_lock(&vm->mutex);
+ err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
+ offset,
+ obj->cache_level,
+ 0);
+ if (!err) {
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ vma->resource = vma_res;
+ } else {
+ kfree(vma_res);
+ }
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
+static int igt_gtt_reserve(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+ struct drm_i915_gem_object *obj, *on;
+ I915_RND_STATE(prng);
+ LIST_HEAD(objects);
+ u64 total;
+ int err = -ENODEV;
+
+ /* i915_gem_gtt_reserve() tries to reserve the precise range
+ * for the node, and evicts if it has to. So our test checks that
+ * it can give us the requsted space and prevent overlaps.
+ */
+
+ /* Start by filling the GGTT */
+ for (total = 0;
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = reserve_gtt_with_resource(vma, total);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
+ total, ggtt->vm.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != total ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
+ vma->node.start, vma->node.size,
+ total, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Now we start forcing evictions */
+ for (total = I915_GTT_PAGE_SIZE;
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = reserve_gtt_with_resource(vma, total);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
+ total, ggtt->vm.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != total ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
+ vma->node.start, vma->node.size,
+ total, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* And then try at random */
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ struct i915_vma *vma;
+ u64 offset;
+
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("i915_vma_unbind failed with err=%d!\n", err);
+ goto out;
+ }
+
+ offset = igt_random_offset(&prng,
+ 0, ggtt->vm.total,
+ 2 * I915_GTT_PAGE_SIZE,
+ I915_GTT_MIN_ALIGNMENT);
+
+ err = reserve_gtt_with_resource(vma, offset);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
+ total, ggtt->vm.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != offset ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
+ vma->node.start, vma->node.size,
+ offset, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ return err;
+}
+
+static int insert_gtt_with_resource(struct i915_vma *vma)
+{
+ struct i915_address_space *vm = vma->vm;
+ struct i915_vma_resource *vma_res;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err;
+
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res))
+ return PTR_ERR(vma_res);
+
+ mutex_lock(&vm->mutex);
+ err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
+ obj->cache_level, 0, vm->total, 0);
+ if (!err) {
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ vma->resource = vma_res;
+ } else {
+ kfree(vma_res);
+ }
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
+static int igt_gtt_insert(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+ struct drm_i915_gem_object *obj, *on;
+ struct drm_mm_node tmp = {};
+ const struct invalid_insert {
+ u64 size;
+ u64 alignment;
+ u64 start, end;
+ } invalid_insert[] = {
+ {
+ ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
+ 0, ggtt->vm.total,
+ },
+ {
+ 2*I915_GTT_PAGE_SIZE, 0,
+ 0, I915_GTT_PAGE_SIZE,
+ },
+ {
+ -(u64)I915_GTT_PAGE_SIZE, 0,
+ 0, 4*I915_GTT_PAGE_SIZE,
+ },
+ {
+ -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
+ 0, 4*I915_GTT_PAGE_SIZE,
+ },
+ {
+ I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
+ I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
+ },
+ {}
+ }, *ii;
+ LIST_HEAD(objects);
+ u64 total;
+ int err = -ENODEV;
+
+ /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
+ * to the node, evicting if required.
+ */
+
+ /* Check a couple of obviously invalid requests */
+ for (ii = invalid_insert; ii->size; ii++) {
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
+ ii->size, ii->alignment,
+ I915_COLOR_UNEVICTABLE,
+ ii->start, ii->end,
+ 0);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err != -ENOSPC) {
+ pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
+ ii->size, ii->alignment, ii->start, ii->end,
+ err);
+ return -EINVAL;
+ }
+ }
+
+ /* Start by filling the GGTT */
+ for (total = 0;
+ total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = insert_gtt_with_resource(vma);
+ if (err == -ENOSPC) {
+ /* maxed out the GGTT space */
+ i915_gem_object_put(obj);
+ break;
+ }
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
+ total, ggtt->vm.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+ __i915_vma_pin(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ }
+
+ list_for_each_entry(obj, &objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node)) {
+ pr_err("VMA was unexpectedly evicted!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ __i915_vma_unpin(vma);
+ }
+
+ /* If we then reinsert, we should find the same hole */
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ struct i915_vma *vma;
+ u64 offset;
+
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ offset = vma->node.start;
+
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("i915_vma_unbind failed with err=%d!\n", err);
+ goto out;
+ }
+
+ err = insert_gtt_with_resource(vma);
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
+ total, ggtt->vm.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != offset) {
+ pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
+ offset, vma->node.start);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* And then force evictions */
+ for (total = 0;
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = insert_gtt_with_resource(vma);
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
+ total, ggtt->vm.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ }
+
+out:
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ return err;
+}
+
+int i915_gem_gtt_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_drunk),
+ SUBTEST(igt_mock_walk),
+ SUBTEST(igt_mock_pot),
+ SUBTEST(igt_mock_fill),
+ SUBTEST(igt_gtt_reserve),
+ SUBTEST(igt_gtt_insert),
+ };
+ struct drm_i915_private *i915;
+ struct intel_gt *gt;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ /* allocate the ggtt */
+ err = intel_gt_assign_ggtt(to_gt(i915));
+ if (err)
+ goto out_put;
+
+ gt = to_gt(i915);
+
+ mock_init_ggtt(gt);
+
+ err = i915_subtests(tests, gt->ggtt);
+
+ mock_device_flush(i915);
+ i915_gem_drain_freed_objects(i915);
+ mock_fini_ggtt(gt->ggtt);
+
+out_put:
+ mock_destroy_device(i915);
+ return err;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ long timeout;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 5);
+ i915_request_put(rq);
+
+ return timeout < 0 ? -EIO : 0;
+}
+
+static struct i915_request *
+submit_batch(struct intel_context *ce, u64 addr)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb) /* detect a hang */
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = rq->engine->emit_bb_start(rq, addr, 0, 0);
+
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return err ? ERR_PTR(err) : rq;
+}
+
+static u32 *spinner(u32 *batch, int i)
+{
+ return batch + i * 64 / sizeof(*batch) + 4;
+}
+
+static void end_spin(u32 *batch, int i)
+{
+ *spinner(batch, i) = MI_BATCH_BUFFER_END;
+ wmb();
+}
+
+static int igt_cs_tlb(void *arg)
+{
+ const unsigned int count = PAGE_SIZE / 64;
+ const unsigned int chunk_size = count * PAGE_SIZE;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *bbe, *act, *out;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct i915_vma *vma;
+ I915_RND_STATE(prng);
+ struct file *file;
+ unsigned int i;
+ u32 *result;
+ u32 *batch;
+ int err = 0;
+
+ /*
+ * Our mission here is to fool the hardware to execute something
+ * from scratch as it has not seen the batch move (due to missing
+ * the TLB invalidate).
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ vm = i915_gem_context_get_eb_vm(ctx);
+ if (i915_is_ggtt(vm))
+ goto out_vm;
+
+ /* Create two pages; dummy we prefill the TLB, and intended */
+ bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(bbe)) {
+ err = PTR_ERR(bbe);
+ goto out_vm;
+ }
+
+ batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_bbe;
+ }
+ memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
+ i915_gem_object_flush_map(bbe);
+ i915_gem_object_unpin_map(bbe);
+
+ act = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(act)) {
+ err = PTR_ERR(act);
+ goto out_put_bbe;
+ }
+
+ /* Track the execution of each request by writing into different slot */
+ batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_act;
+ }
+ for (i = 0; i < count; i++) {
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
+
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
+ cs[0] = MI_STORE_DWORD_IMM_GEN4;
+ if (GRAPHICS_VER(i915) >= 8) {
+ cs[1] = lower_32_bits(addr);
+ cs[2] = upper_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START_GEN8;
+ } else {
+ cs[1] = 0;
+ cs[2] = lower_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START;
+ }
+ }
+
+ out = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(out)) {
+ err = PTR_ERR(out);
+ goto out_put_batch;
+ }
+ i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(out, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put_out;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ PIN_USER |
+ PIN_OFFSET_FIXED |
+ (vm->total - PAGE_SIZE));
+ if (err)
+ goto out_put_out;
+ GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
+
+ result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
+ if (IS_ERR(result)) {
+ err = PTR_ERR(result);
+ goto out_put_out;
+ }
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ IGT_TIMEOUT(end_time);
+ unsigned long pass = 0;
+
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ while (!__igt_timeout(end_time, NULL)) {
+ struct i915_vm_pt_stash stash = {};
+ struct i915_request *rq;
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma_resource *vma_res;
+ u64 offset;
+
+ offset = igt_random_offset(&prng,
+ 0, vm->total - PAGE_SIZE,
+ chunk_size, PAGE_SIZE);
+
+ memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ vma = i915_vma_instance(bbe, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ i915_gem_object_lock(bbe, NULL);
+ err = i915_vma_get_pages(vma);
+ i915_gem_object_unlock(bbe);
+ if (err)
+ goto end;
+
+ vma_res = i915_vma_resource_alloc();
+ if (IS_ERR(vma_res)) {
+ i915_vma_put_pages(vma);
+ err = PTR_ERR(vma_res);
+ goto end;
+ }
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_vm_lock_objects(vm, &ww);
+ if (err)
+ goto end_ww;
+
+ err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
+ if (err)
+ goto end_ww;
+
+ err = i915_vm_map_pt_stash(vm, &stash);
+ if (!err)
+ vm->allocate_va_range(vm, &stash, offset, chunk_size);
+ i915_vm_free_pt_stash(vm, &stash);
+end_ww:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (err) {
+ kfree(vma_res);
+ goto end;
+ }
+
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ /* Prime the TLB with the dummy pages */
+ for (i = 0; i < count; i++) {
+ vma_res->start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma_res, I915_CACHE_NONE,
+ 0);
+
+ rq = submit_batch(ce, vma_res->start);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
+ goto end;
+ }
+ i915_request_put(rq);
+ }
+ i915_vma_resource_fini(vma_res);
+ i915_vma_put_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: dummy setup timed out\n",
+ ce->engine->name);
+ kfree(vma_res);
+ goto end;
+ }
+
+ vma = i915_vma_instance(act, vm, NULL);
+ if (IS_ERR(vma)) {
+ kfree(vma_res);
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ i915_gem_object_lock(act, NULL);
+ err = i915_vma_get_pages(vma);
+ i915_gem_object_unlock(act);
+ if (err) {
+ kfree(vma_res);
+ goto end;
+ }
+
+ i915_vma_resource_init_from_vma(vma_res, vma);
+ /* Replace the TLB with target batches */
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr;
+
+ vma_res->start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma_res, I915_CACHE_NONE, 0);
+
+ addr = vma_res->start + i * 64;
+ cs[4] = MI_NOOP;
+ cs[6] = lower_32_bits(addr);
+ cs[7] = upper_32_bits(addr);
+ wmb();
+
+ rq = submit_batch(ce, addr);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
+ goto end;
+ }
+
+ /* Wait until the context chain has started */
+ if (i == 0) {
+ while (READ_ONCE(result[i]) &&
+ !i915_request_completed(rq))
+ cond_resched();
+ } else {
+ end_spin(batch, i - 1);
+ }
+
+ i915_request_put(rq);
+ }
+ end_spin(batch, count - 1);
+
+ i915_vma_resource_fini(vma_res);
+ kfree(vma_res);
+ i915_vma_put_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: writes timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (result[i] != i) {
+ pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
+ ce->engine->name, pass,
+ offset, i, result[i], i);
+ err = -EINVAL;
+ goto end;
+ }
+ }
+
+ vm->clear_range(vm, offset, chunk_size);
+ pass++;
+ }
+ }
+end:
+ if (igt_flush_test(i915))
+ err = -EIO;
+ i915_gem_context_unlock_engines(ctx);
+ i915_gem_object_unpin_map(out);
+out_put_out:
+ i915_gem_object_put(out);
+out_put_batch:
+ i915_gem_object_unpin_map(act);
+out_put_act:
+ i915_gem_object_put(act);
+out_put_bbe:
+ i915_gem_object_put(bbe);
+out_vm:
+ i915_vm_put(vm);
+out_unlock:
+ fput(file);
+ return err;
+}
+
+int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_ppgtt_alloc),
+ SUBTEST(igt_ppgtt_lowlevel),
+ SUBTEST(igt_ppgtt_drunk),
+ SUBTEST(igt_ppgtt_walk),
+ SUBTEST(igt_ppgtt_pot),
+ SUBTEST(igt_ppgtt_fill),
+ SUBTEST(igt_ppgtt_shrink),
+ SUBTEST(igt_ppgtt_shrink_boom),
+ SUBTEST(igt_ppgtt_misaligned_pin),
+ SUBTEST(igt_ggtt_lowlevel),
+ SUBTEST(igt_ggtt_drunk),
+ SUBTEST(igt_ggtt_walk),
+ SUBTEST(igt_ggtt_pot),
+ SUBTEST(igt_ggtt_fill),
+ SUBTEST(igt_ggtt_page),
+ SUBTEST(igt_ggtt_misaligned_pin),
+ SUBTEST(igt_cs_tlb),
+ };
+
+ GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
new file mode 100644
index 000000000..aaf8a380e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/i915_selftest
+ */
+selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
+selftest(uncore, intel_uncore_live_selftests)
+selftest(workarounds, intel_workarounds_live_selftests)
+selftest(gt_engines, intel_engine_live_selftests)
+selftest(gt_timelines, intel_timeline_live_selftests)
+selftest(gt_contexts, intel_context_live_selftests)
+selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
+selftest(gt_pm, intel_gt_pm_live_selftests)
+selftest(gt_heartbeat, intel_heartbeat_live_selftests)
+selftest(requests, i915_request_live_selftests)
+selftest(migrate, intel_migrate_live_selftests)
+selftest(active, i915_active_live_selftests)
+selftest(objects, i915_gem_object_live_selftests)
+selftest(mman, i915_gem_mman_live_selftests)
+selftest(dmabuf, i915_gem_dmabuf_live_selftests)
+selftest(vma, i915_vma_live_selftests)
+selftest(coherency, i915_gem_coherency_live_selftests)
+selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(gem, i915_gem_live_selftests)
+selftest(evict, i915_gem_evict_live_selftests)
+selftest(hugepages, i915_gem_huge_page_live_selftests)
+selftest(gem_contexts, i915_gem_context_live_selftests)
+selftest(client, i915_gem_client_blt_live_selftests)
+selftest(gem_migrate, i915_gem_migrate_live_selftests)
+selftest(reset, intel_reset_live_selftests)
+selftest(memory_region, intel_memory_region_live_selftests)
+selftest(hangcheck, intel_hangcheck_live_selftests)
+selftest(execlists, intel_execlists_live_selftests)
+selftest(ring_submission, intel_ring_submission_live_selftests)
+selftest(perf, i915_perf_live_selftests)
+selftest(slpc, intel_slpc_live_selftests)
+selftest(guc, intel_guc_live_selftests)
+selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests)
+selftest(guc_hang, intel_guc_hang_check)
+/* Here be dragons: keep last to run last! */
+selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
new file mode 100644
index 000000000..0c22e0fc9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/i915_selftest
+ */
+selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
+selftest(shmem, shmem_utils_mock_selftests)
+selftest(fence, i915_sw_fence_mock_selftests)
+selftest(scatterlist, scatterlist_mock_selftests)
+selftest(syncmap, i915_syncmap_mock_selftests)
+selftest(uncore, intel_uncore_mock_selftests)
+selftest(ring, intel_ring_mock_selftests)
+selftest(engine, intel_engine_cs_mock_selftests)
+selftest(timelines, intel_timeline_mock_selftests)
+selftest(requests, i915_request_mock_selftests)
+selftest(objects, i915_gem_object_mock_selftests)
+selftest(phys, i915_gem_phys_mock_selftests)
+selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
+selftest(vma, i915_vma_mock_selftests)
+selftest(evict, i915_gem_evict_mock_selftests)
+selftest(gtt, i915_gem_gtt_mock_selftests)
+selftest(hugepages, i915_gem_huge_page_mock_selftests)
+selftest(memory_region, intel_memory_region_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
new file mode 100644
index 000000000..429c6d73b
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -0,0 +1,439 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kref.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+
+#define TEST_OA_CONFIG_UUID "12345678-1234-1234-1234-1234567890ab"
+
+static int
+alloc_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config;
+
+ oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
+ if (!oa_config)
+ return -ENOMEM;
+
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
+
+ strlcpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid));
+
+ mutex_lock(&perf->metrics_lock);
+
+ oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL);
+ if (oa_config->id < 0) {
+ mutex_unlock(&perf->metrics_lock);
+ i915_oa_config_put(oa_config);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&perf->metrics_lock);
+
+ return 0;
+}
+
+static void
+destroy_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config = NULL, *tmp;
+ int id;
+
+ mutex_lock(&perf->metrics_lock);
+
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+ oa_config = tmp;
+ break;
+ }
+ }
+
+ if (oa_config)
+ idr_remove(&perf->metrics_idr, oa_config->id);
+
+ mutex_unlock(&perf->metrics_lock);
+
+ if (oa_config)
+ i915_oa_config_put(oa_config);
+}
+
+static struct i915_oa_config *
+get_empty_config(struct i915_perf *perf)
+{
+ struct i915_oa_config *oa_config = NULL, *tmp;
+ int id;
+
+ mutex_lock(&perf->metrics_lock);
+
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
+ oa_config = i915_oa_config_get(tmp);
+ break;
+ }
+ }
+
+ mutex_unlock(&perf->metrics_lock);
+
+ return oa_config;
+}
+
+static struct i915_perf_stream *
+test_stream(struct i915_perf *perf)
+{
+ struct drm_i915_perf_open_param param = {};
+ struct i915_oa_config *oa_config = get_empty_config(perf);
+ struct perf_open_properties props = {
+ .engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0),
+ .sample_flags = SAMPLE_OA_REPORT,
+ .oa_format = GRAPHICS_VER(perf->i915) == 12 ?
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
+ };
+ struct i915_perf_stream *stream;
+
+ if (!oa_config)
+ return NULL;
+
+ props.metrics_set = oa_config->id;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream) {
+ i915_oa_config_put(oa_config);
+ return NULL;
+ }
+
+ stream->perf = perf;
+
+ mutex_lock(&perf->lock);
+ if (i915_oa_stream_init(stream, &param, &props)) {
+ kfree(stream);
+ stream = NULL;
+ }
+ mutex_unlock(&perf->lock);
+
+ i915_oa_config_put(oa_config);
+
+ return stream;
+}
+
+static void stream_destroy(struct i915_perf_stream *stream)
+{
+ struct i915_perf *perf = stream->perf;
+
+ mutex_lock(&perf->lock);
+ i915_perf_destroy_locked(stream);
+ mutex_unlock(&perf->lock);
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+
+ /* Quick check we can create a perf stream */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -EINVAL;
+
+ stream_destroy(stream);
+ return 0;
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ u32 *cs;
+ int len;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ len = 5;
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ len++;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(len);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_WRITE_TIMESTAMP;
+ *cs++ = slot * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static ktime_t poll_status(struct i915_request *rq, int slot)
+{
+ while (!intel_read_status_page(rq->engine, slot) &&
+ !i915_request_completed(rq))
+ cpu_relax();
+
+ return ktime_get();
+}
+
+static int live_noa_delay(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u64 expected;
+ u32 delay;
+ int err;
+ int i;
+
+ /* Check that the GPU delays matches expectations */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -ENOMEM;
+
+ expected = atomic64_read(&stream->perf->noa_programming_delay);
+
+ if (stream->engine->class != RENDER_CLASS) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < 4; i++)
+ intel_write_status_page(stream->engine, 0x100 + i, 0);
+
+ rq = intel_engine_create_kernel_request(stream->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ err = write_timestamp(rq, 0x100);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ i915_ggtt_offset(stream->noa_wait), 0,
+ I915_DISPATCH_SECURE);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = write_timestamp(rq, 0x102);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ preempt_disable();
+ t0 = poll_status(rq, 0x100);
+ t1 = poll_status(rq, 0x102);
+ preempt_enable();
+
+ pr_info("CPU delay: %lluns, expected %lluns\n",
+ ktime_sub(t1, t0), expected);
+
+ delay = intel_read_status_page(stream->engine, 0x102);
+ delay -= intel_read_status_page(stream->engine, 0x100);
+ delay = intel_gt_clock_interval_to_ns(stream->engine->gt, delay);
+ pr_info("GPU delay: %uns, expected %lluns\n",
+ delay, expected);
+
+ if (4 * delay < 3 * expected || 2 * delay > 3 * expected) {
+ pr_err("GPU delay [%uus] outside of expected threshold! [%lluus, %lluus]\n",
+ delay / 1000,
+ div_u64(3 * expected, 4000),
+ div_u64(3 * expected, 2000));
+ err = -EINVAL;
+ }
+
+ i915_request_put(rq);
+out:
+ stream_destroy(stream);
+ return err;
+}
+
+static int live_noa_gpr(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 *cs, *store;
+ void *scratch;
+ u32 gpr0;
+ int err;
+ int i;
+
+ /* Check that the delay does not clobber user context state (GPR) */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -ENOMEM;
+
+ gpr0 = i915_mmio_reg_offset(GEN8_RING_CS_GPR(stream->engine->mmio_base, 0));
+
+ ce = intel_context_create(stream->engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ /* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
+ scratch = __px_vaddr(ce->vm->scratch[0]);
+ memset(scratch, POISON_FREE, PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+ i915_request_get(rq);
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out_rq;
+ }
+ }
+
+ /* Fill the 16 qword [32 dword] GPR with a known unlikely value */
+ cs = intel_ring_begin(rq, 2 * 32 + 2);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(32);
+ for (i = 0; i < 32; i++) {
+ *cs++ = gpr0 + i * sizeof(u32);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Execute the GPU delay */
+ err = rq->engine->emit_bb_start(rq,
+ i915_ggtt_offset(stream->noa_wait), 0,
+ I915_DISPATCH_SECURE);
+ if (err) {
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ /* Read the GPR back, using the pinned global HWSP for convenience */
+ store = memset32(rq->engine->status_page.addr + 512, 0, 32);
+ for (i = 0; i < 32; i++) {
+ u32 cmd;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto out_rq;
+ }
+
+ cmd = MI_STORE_REGISTER_MEM;
+ if (GRAPHICS_VER(i915) >= 8)
+ cmd++;
+ cmd |= MI_USE_GGTT;
+
+ *cs++ = cmd;
+ *cs++ = gpr0 + i * sizeof(u32);
+ *cs++ = i915_ggtt_offset(rq->engine->status_page.vma) +
+ offset_in_page(store) +
+ i * sizeof(u32);
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+ }
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, HZ / 2) < 0) {
+ pr_err("noa_wait timed out\n");
+ intel_gt_set_wedged(stream->engine->gt);
+ err = -EIO;
+ goto out_rq;
+ }
+
+ /* Verify that the GPR contain our expected values */
+ for (i = 0; i < 32; i++) {
+ if (store[i] == STACK_MAGIC)
+ continue;
+
+ pr_err("GPR[%d] lost, found:%08x, expected:%08x!\n",
+ i, store[i], STACK_MAGIC);
+ err = -EINVAL;
+ }
+
+ /* Verify that the user's scratch page was not used for GPR storage */
+ if (memchr_inv(scratch, POISON_FREE, PAGE_SIZE)) {
+ pr_err("Scratch page overwritten!\n");
+ igt_hexdump(scratch, 4096);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out:
+ stream_destroy(stream);
+ return err;
+}
+
+int i915_perf_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_noa_delay),
+ SUBTEST(live_noa_gpr),
+ };
+ struct i915_perf *perf = &i915->perf;
+ int err;
+
+ if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
+ return 0;
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ err = alloc_empty_config(&i915->perf);
+ if (err)
+ return err;
+
+ err = i915_live_subtests(tests, i915);
+
+ destroy_empty_config(&i915->perf);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
new file mode 100644
index 000000000..058450d35
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef selftest
+#define selftest(x, y)
+#endif
+
+/*
+ * List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/i915_selftest
+ */
+selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(request, i915_request_perf_selftests)
+selftest(migrate, intel_migrate_perf_selftests)
+selftest(region, intel_memory_region_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
new file mode 100644
index 000000000..abdfadcf6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "i915_random.h"
+#include "i915_utils.h"
+
+u64 i915_prandom_u64_state(struct rnd_state *rnd)
+{
+ u64 x;
+
+ x = prandom_u32_state(rnd);
+ x <<= 32;
+ x |= prandom_u32_state(rnd);
+
+ return x;
+}
+
+void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
+ struct rnd_state *state)
+{
+ char stack[128];
+
+ if (WARN_ON(elsz > sizeof(stack) || count > U32_MAX))
+ return;
+
+ if (!elsz || !count)
+ return;
+
+ /* Fisher-Yates shuffle courtesy of Knuth */
+ while (--count) {
+ size_t swp;
+
+ swp = i915_prandom_u32_max_state(count + 1, state);
+ if (swp == count)
+ continue;
+
+ memcpy(stack, arr + count * elsz, elsz);
+ memcpy(arr + count * elsz, arr + swp * elsz, elsz);
+ memcpy(arr + swp * elsz, stack, elsz);
+ }
+}
+
+void i915_random_reorder(unsigned int *order, unsigned int count,
+ struct rnd_state *state)
+{
+ i915_prandom_shuffle(order, sizeof(*order), count, state);
+}
+
+unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
+{
+ unsigned int *order, i;
+
+ order = kmalloc_array(count, sizeof(*order),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!order)
+ return order;
+
+ for (i = 0; i < count; i++)
+ order[i] = i;
+
+ i915_random_reorder(order, count, state);
+ return order;
+}
+
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align)
+{
+ u64 range, addr;
+
+ BUG_ON(range_overflows(start, len, end));
+ BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+ range = round_down(end - len, align) - round_up(start, align);
+ if (range) {
+ addr = i915_prandom_u64_state(state);
+ div64_u64_rem(addr, range, &addr);
+ start += addr;
+ }
+
+ return round_up(start, align);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
new file mode 100644
index 000000000..05364eca2
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_SELFTESTS_RANDOM_H__
+#define __I915_SELFTESTS_RANDOM_H__
+
+#include <linux/math64.h>
+#include <linux/random.h>
+
+#include "../i915_selftest.h"
+
+#define I915_RND_STATE_INITIALIZER(x) ({ \
+ struct rnd_state state__; \
+ prandom_seed_state(&state__, (x)); \
+ state__; \
+})
+
+#define I915_RND_STATE(name__) \
+ struct rnd_state name__ = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed)
+
+#define I915_RND_SUBSTATE(name__, parent__) \
+ struct rnd_state name__ = I915_RND_STATE_INITIALIZER(prandom_u32_state(&(parent__)))
+
+u64 i915_prandom_u64_state(struct rnd_state *rnd);
+
+static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
+{
+ return upper_32_bits(mul_u32_u32(prandom_u32_state(state), ep_ro));
+}
+
+unsigned int *i915_random_order(unsigned int count,
+ struct rnd_state *state);
+void i915_random_reorder(unsigned int *order,
+ unsigned int count,
+ struct rnd_state *state);
+
+void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
+ struct rnd_state *state);
+
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align);
+
+#endif /* !__I915_SELFTESTS_RANDOM_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
new file mode 100644
index 000000000..a46350c37
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -0,0 +1,3288 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prime_numbers.h>
+#include <linux/pm_qos.h>
+#include <linux/sort.h>
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/mock_context.h"
+
+#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_gt_requests.h"
+#include "gt/selftest_engine_heartbeat.h"
+
+#include "i915_random.h"
+#include "i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_live_test.h"
+#include "igt_spinner.h"
+#include "lib_sw_fence.h"
+
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+
+static unsigned int num_uabi_engines(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ return count;
+}
+
+static struct intel_engine_cs *rcs0(struct drm_i915_private *i915)
+{
+ return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0);
+}
+
+static int igt_add_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_request *request;
+
+ /* Basic preliminary test to create a request and let it loose! */
+
+ request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
+ if (!request)
+ return -ENOMEM;
+
+ i915_request_add(request);
+
+ return 0;
+}
+
+static int igt_wait_request(void *arg)
+{
+ const long T = HZ / 4;
+ struct drm_i915_private *i915 = arg;
+ struct i915_request *request;
+ int err = -EINVAL;
+
+ /* Submit a request, then wait upon it */
+
+ request = mock_request(rcs0(i915)->kernel_context, T);
+ if (!request)
+ return -ENOMEM;
+
+ i915_request_get(request);
+
+ if (i915_request_wait(request, 0, 0) != -ETIME) {
+ pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
+ goto out_request;
+ }
+
+ if (i915_request_wait(request, 0, T) != -ETIME) {
+ pr_err("request wait succeeded (expected timeout before submit!)\n");
+ goto out_request;
+ }
+
+ if (i915_request_completed(request)) {
+ pr_err("request completed before submit!!\n");
+ goto out_request;
+ }
+
+ i915_request_add(request);
+
+ if (i915_request_wait(request, 0, 0) != -ETIME) {
+ pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
+ goto out_request;
+ }
+
+ if (i915_request_completed(request)) {
+ pr_err("request completed immediately!\n");
+ goto out_request;
+ }
+
+ if (i915_request_wait(request, 0, T / 2) != -ETIME) {
+ pr_err("request wait succeeded (expected timeout!)\n");
+ goto out_request;
+ }
+
+ if (i915_request_wait(request, 0, T) == -ETIME) {
+ pr_err("request wait timed out!\n");
+ goto out_request;
+ }
+
+ if (!i915_request_completed(request)) {
+ pr_err("request not complete after waiting!\n");
+ goto out_request;
+ }
+
+ if (i915_request_wait(request, 0, T) == -ETIME) {
+ pr_err("request wait timed out when already complete!\n");
+ goto out_request;
+ }
+
+ err = 0;
+out_request:
+ i915_request_put(request);
+ mock_device_flush(i915);
+ return err;
+}
+
+static int igt_fence_wait(void *arg)
+{
+ const long T = HZ / 4;
+ struct drm_i915_private *i915 = arg;
+ struct i915_request *request;
+ int err = -EINVAL;
+
+ /* Submit a request, treat it as a fence and wait upon it */
+
+ request = mock_request(rcs0(i915)->kernel_context, T);
+ if (!request)
+ return -ENOMEM;
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
+ pr_err("fence wait success before submit (expected timeout)!\n");
+ goto out;
+ }
+
+ i915_request_add(request);
+
+ if (dma_fence_is_signaled(&request->fence)) {
+ pr_err("fence signaled immediately!\n");
+ goto out;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
+ pr_err("fence wait success after submit (expected timeout)!\n");
+ goto out;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
+ pr_err("fence wait timed out (expected success)!\n");
+ goto out;
+ }
+
+ if (!dma_fence_is_signaled(&request->fence)) {
+ pr_err("fence unsignaled after waiting!\n");
+ goto out;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
+ pr_err("fence wait timed out when complete (expected success)!\n");
+ goto out;
+ }
+
+ err = 0;
+out:
+ mock_device_flush(i915);
+ return err;
+}
+
+static int igt_request_rewind(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_request *request, *vip;
+ struct i915_gem_context *ctx[2];
+ struct intel_context *ce;
+ int err = -EINVAL;
+
+ ctx[0] = mock_context(i915, "A");
+ if (!ctx[0]) {
+ err = -ENOMEM;
+ goto err_ctx_0;
+ }
+
+ ce = i915_gem_context_get_engine(ctx[0], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ request = mock_request(ce, 2 * HZ);
+ intel_context_put(ce);
+ if (!request) {
+ err = -ENOMEM;
+ goto err_context_0;
+ }
+
+ i915_request_get(request);
+ i915_request_add(request);
+
+ ctx[1] = mock_context(i915, "B");
+ if (!ctx[1]) {
+ err = -ENOMEM;
+ goto err_ctx_1;
+ }
+
+ ce = i915_gem_context_get_engine(ctx[1], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ vip = mock_request(ce, 0);
+ intel_context_put(ce);
+ if (!vip) {
+ err = -ENOMEM;
+ goto err_context_1;
+ }
+
+ /* Simulate preemption by manual reordering */
+ if (!mock_cancel_request(request)) {
+ pr_err("failed to cancel request (already executed)!\n");
+ i915_request_add(vip);
+ goto err_context_1;
+ }
+ i915_request_get(vip);
+ i915_request_add(vip);
+ rcu_read_lock();
+ request->engine->submit_request(request);
+ rcu_read_unlock();
+
+
+ if (i915_request_wait(vip, 0, HZ) == -ETIME) {
+ pr_err("timed out waiting for high priority request\n");
+ goto err;
+ }
+
+ if (i915_request_completed(request)) {
+ pr_err("low priority request already completed\n");
+ goto err;
+ }
+
+ err = 0;
+err:
+ i915_request_put(vip);
+err_context_1:
+ mock_context_close(ctx[1]);
+err_ctx_1:
+ i915_request_put(request);
+err_context_0:
+ mock_context_close(ctx[0]);
+err_ctx_0:
+ mock_device_flush(i915);
+ return err;
+}
+
+struct smoketest {
+ struct intel_engine_cs *engine;
+ struct i915_gem_context **contexts;
+ atomic_long_t num_waits, num_fences;
+ int ncontexts, max_batch;
+ struct i915_request *(*request_alloc)(struct intel_context *ce);
+};
+
+static struct i915_request *
+__mock_request_alloc(struct intel_context *ce)
+{
+ return mock_request(ce, 0);
+}
+
+static struct i915_request *
+__live_request_alloc(struct intel_context *ce)
+{
+ return intel_context_create_request(ce);
+}
+
+struct smoke_thread {
+ struct kthread_worker *worker;
+ struct kthread_work work;
+ struct smoketest *t;
+ bool stop;
+ int result;
+};
+
+static void __igt_breadcrumbs_smoketest(struct kthread_work *work)
+{
+ struct smoke_thread *thread = container_of(work, typeof(*thread), work);
+ struct smoketest *t = thread->t;
+ const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
+ const unsigned int total = 4 * t->ncontexts + 1;
+ unsigned int num_waits = 0, num_fences = 0;
+ struct i915_request **requests;
+ I915_RND_STATE(prng);
+ unsigned int *order;
+ int err = 0;
+
+ /*
+ * A very simple test to catch the most egregious of list handling bugs.
+ *
+ * At its heart, we simply create oodles of requests running across
+ * multiple kthreads and enable signaling on them, for the sole purpose
+ * of stressing our breadcrumb handling. The only inspection we do is
+ * that the fences were marked as signaled.
+ */
+
+ requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
+ if (!requests) {
+ thread->result = -ENOMEM;
+ return;
+ }
+
+ order = i915_random_order(total, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_requests;
+ }
+
+ while (!READ_ONCE(thread->stop)) {
+ struct i915_sw_fence *submit, *wait;
+ unsigned int n, count;
+
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ err = -ENOMEM;
+ break;
+ }
+
+ wait = heap_fence_create(GFP_KERNEL);
+ if (!wait) {
+ i915_sw_fence_commit(submit);
+ heap_fence_put(submit);
+ err = -ENOMEM;
+ break;
+ }
+
+ i915_random_reorder(order, total, &prng);
+ count = 1 + i915_prandom_u32_max_state(max_batch, &prng);
+
+ for (n = 0; n < count; n++) {
+ struct i915_gem_context *ctx =
+ t->contexts[order[n] % t->ncontexts];
+ struct i915_request *rq;
+ struct intel_context *ce;
+
+ ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+ rq = t->request_alloc(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ count = n;
+ break;
+ }
+
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ submit,
+ GFP_KERNEL);
+
+ requests[n] = i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (err >= 0)
+ err = i915_sw_fence_await_dma_fence(wait,
+ &rq->fence,
+ 0,
+ GFP_KERNEL);
+
+ if (err < 0) {
+ i915_request_put(rq);
+ count = n;
+ break;
+ }
+ }
+
+ i915_sw_fence_commit(submit);
+ i915_sw_fence_commit(wait);
+
+ if (!wait_event_timeout(wait->wait,
+ i915_sw_fence_done(wait),
+ 5 * HZ)) {
+ struct i915_request *rq = requests[count - 1];
+
+ pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n",
+ atomic_read(&wait->pending), count,
+ rq->fence.context, rq->fence.seqno,
+ t->engine->name);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(t->engine->gt);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_sw_fence_wait(wait);
+ err = -EIO;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct i915_request *rq = requests[n];
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags)) {
+ pr_err("%llu:%llu was not signaled!\n",
+ rq->fence.context, rq->fence.seqno);
+ err = -EINVAL;
+ }
+
+ i915_request_put(rq);
+ }
+
+ heap_fence_put(wait);
+ heap_fence_put(submit);
+
+ if (err < 0)
+ break;
+
+ num_fences += count;
+ num_waits++;
+
+ cond_resched();
+ }
+
+ atomic_long_add(num_fences, &t->num_fences);
+ atomic_long_add(num_waits, &t->num_waits);
+
+ kfree(order);
+out_requests:
+ kfree(requests);
+ thread->result = err;
+}
+
+static int mock_breadcrumbs_smoketest(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct smoketest t = {
+ .engine = rcs0(i915),
+ .ncontexts = 1024,
+ .max_batch = 1024,
+ .request_alloc = __mock_request_alloc
+ };
+ unsigned int ncpus = num_online_cpus();
+ struct smoke_thread *threads;
+ unsigned int n;
+ int ret = 0;
+
+ /*
+ * Smoketest our breadcrumb/signal handling for requests across multiple
+ * threads. A very simple test to only catch the most egregious of bugs.
+ * See __igt_breadcrumbs_smoketest();
+ */
+
+ threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
+ if (!t.contexts) {
+ ret = -ENOMEM;
+ goto out_threads;
+ }
+
+ for (n = 0; n < t.ncontexts; n++) {
+ t.contexts[n] = mock_context(t.engine->i915, "mock");
+ if (!t.contexts[n]) {
+ ret = -ENOMEM;
+ goto out_contexts;
+ }
+ }
+
+ for (n = 0; n < ncpus; n++) {
+ struct kthread_worker *worker;
+
+ worker = kthread_create_worker(0, "igt/%d", n);
+ if (IS_ERR(worker)) {
+ ret = PTR_ERR(worker);
+ ncpus = n;
+ break;
+ }
+
+ threads[n].worker = worker;
+ threads[n].t = &t;
+ threads[n].stop = false;
+ threads[n].result = 0;
+
+ kthread_init_work(&threads[n].work,
+ __igt_breadcrumbs_smoketest);
+ kthread_queue_work(worker, &threads[n].work);
+ }
+
+ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+
+ for (n = 0; n < ncpus; n++) {
+ int err;
+
+ WRITE_ONCE(threads[n].stop, true);
+ kthread_flush_work(&threads[n].work);
+ err = READ_ONCE(threads[n].result);
+ if (err < 0 && !ret)
+ ret = err;
+
+ kthread_destroy_worker(threads[n].worker);
+ }
+ pr_info("Completed %lu waits for %lu fence across %d cpus\n",
+ atomic_long_read(&t.num_waits),
+ atomic_long_read(&t.num_fences),
+ ncpus);
+
+out_contexts:
+ for (n = 0; n < t.ncontexts; n++) {
+ if (!t.contexts[n])
+ break;
+ mock_context_close(t.contexts[n]);
+ }
+ kfree(t.contexts);
+out_threads:
+ kfree(threads);
+ return ret;
+}
+
+int i915_request_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_add_request),
+ SUBTEST(igt_wait_request),
+ SUBTEST(igt_fence_wait),
+ SUBTEST(igt_request_rewind),
+ SUBTEST(mock_breadcrumbs_smoketest),
+ };
+ struct drm_i915_private *i915;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ err = i915_subtests(tests, i915);
+
+ mock_destroy_device(i915);
+
+ return err;
+}
+
+static int live_nop_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct igt_live_test t;
+ int err = -ENODEV;
+
+ /*
+ * Submit various sized batches of empty requests, to each engine
+ * (individually), and wait for the batch to complete. We can check
+ * the overhead of submitting requests to the hardware.
+ */
+
+ for_each_uabi_engine(engine, i915) {
+ unsigned long n, prime;
+ IGT_TIMEOUT(end_time);
+ ktime_t times[2] = {};
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ return err;
+
+ intel_engine_pm_get(engine);
+ for_each_prime_number_from(prime, 1, 8192) {
+ struct i915_request *request = NULL;
+
+ times[1] = ktime_get_raw();
+
+ for (n = 0; n < prime; n++) {
+ i915_request_put(request);
+ request = i915_request_create(engine->kernel_context);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+
+ /*
+ * This space is left intentionally blank.
+ *
+ * We do not actually want to perform any
+ * action with this request, we just want
+ * to measure the latency in allocation
+ * and submission of our breadcrumbs -
+ * ensuring that the bare request is sufficient
+ * for the system to work (i.e. proper HEAD
+ * tracking of the rings, interrupt handling,
+ * etc). It also gives us the lowest bounds
+ * for latency.
+ */
+
+ i915_request_get(request);
+ i915_request_add(request);
+ }
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+ intel_engine_pm_put(engine);
+
+ err = igt_live_test_end(&t);
+ if (err)
+ return err;
+
+ pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ engine->name,
+ ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+ }
+
+ return err;
+}
+
+static int __cancel_inactive(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling inactive request\n", engine->name);
+ i915_request_cancel(rq, -EINTR);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel inactive request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int __cancel_active(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling active request\n", engine->name);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+ i915_request_cancel(rq, -EINTR);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel active request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int __cancel_completed(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+ igt_spinner_end(&spin);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ pr_debug("%s: Cancelling completed request\n", engine->name);
+ i915_request_cancel(rq, -EINTR);
+ if (rq->fence.error) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+/*
+ * Test to prove a non-preemptable request can be cancelled and a subsequent
+ * request on the same context can successfully complete after cancellation.
+ *
+ * Testing methodology is to create a non-preemptible request and submit it,
+ * wait for spinner to start, create a NOP request and submit it, cancel the
+ * spinner, wait for spinner to complete and verify it failed with an error,
+ * finally wait for NOP request to complete verify it succeeded without an
+ * error. Preemption timeout also reduced / restored so test runs in a timely
+ * maner.
+ */
+static int __cancel_reset(struct drm_i915_private *i915,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq, *nop;
+ unsigned long preempt_timeout_ms;
+ int err = 0;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT ||
+ !intel_has_reset_engine(engine->gt))
+ return 0;
+
+ preempt_timeout_ms = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 100;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ goto out_restore;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling active non-preemptable request\n",
+ engine->name);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ nop = intel_context_create_request(ce);
+ if (IS_ERR(nop))
+ goto out_rq;
+ i915_request_get(nop);
+ i915_request_add(nop);
+
+ i915_request_cancel(rq, -EINTR);
+
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel hung request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_nop;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ goto out_nop;
+ }
+
+ if (i915_request_wait(nop, 0, HZ) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to complete nop request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_nop;
+ }
+
+ if (nop->fence.error != 0) {
+ pr_err("%s: Nop request errored (%u)\n",
+ engine->name, nop->fence.error);
+ err = -EINVAL;
+ }
+
+out_nop:
+ i915_request_put(nop);
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+out_restore:
+ engine->props.preempt_timeout_ms = preempt_timeout_ms;
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int live_cancel_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+
+ /*
+ * Check cancellation of requests. We expect to be able to immediately
+ * cancel active requests, even if they are currently on the GPU.
+ */
+
+ for_each_uabi_engine(engine, i915) {
+ struct igt_live_test t;
+ int err, err2;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ return err;
+
+ err = __cancel_inactive(engine);
+ if (err == 0)
+ err = __cancel_active(engine);
+ if (err == 0)
+ err = __cancel_completed(engine);
+
+ err2 = igt_live_test_end(&t);
+ if (err)
+ return err;
+ if (err2)
+ return err2;
+
+ /* Expects reset so call outside of igt_live_test_* */
+ err = __cancel_reset(i915, engine);
+ if (err)
+ return err;
+
+ if (igt_flush_test(i915))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *empty_batch(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ *cmd = MI_BATCH_BUFFER_END;
+
+ __i915_gem_object_flush_map(obj, 0, 64);
+ i915_gem_object_unpin_map(obj);
+
+ intel_gt_chipset_flush(to_gt(i915));
+
+ vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
+ if (err)
+ goto err;
+
+ /* Force the wait now to avoid including it in the benchmark */
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
+ return vma;
+
+err_pin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static struct i915_request *
+empty_request(struct intel_engine_cs *engine,
+ struct i915_vma *batch)
+{
+ struct i915_request *request;
+ int err;
+
+ request = i915_request_create(engine->kernel_context);
+ if (IS_ERR(request))
+ return request;
+
+ err = engine->emit_bb_start(request,
+ batch->node.start,
+ batch->node.size,
+ I915_DISPATCH_SECURE);
+ if (err)
+ goto out_request;
+
+ i915_request_get(request);
+out_request:
+ i915_request_add(request);
+ return err ? ERR_PTR(err) : request;
+}
+
+static int live_empty_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct igt_live_test t;
+ struct i915_vma *batch;
+ int err = 0;
+
+ /*
+ * Submit various sized batches of empty requests, to each engine
+ * (individually), and wait for the batch to complete. We can check
+ * the overhead of submitting requests to the hardware.
+ */
+
+ batch = empty_batch(i915);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ for_each_uabi_engine(engine, i915) {
+ IGT_TIMEOUT(end_time);
+ struct i915_request *request;
+ unsigned long n, prime;
+ ktime_t times[2] = {};
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_batch;
+
+ intel_engine_pm_get(engine);
+
+ /* Warmup / preload */
+ request = empty_request(engine, batch);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ intel_engine_pm_put(engine);
+ goto out_batch;
+ }
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ for (n = 0; n < prime; n++) {
+ i915_request_put(request);
+ request = empty_request(engine, batch);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ intel_engine_pm_put(engine);
+ goto out_batch;
+ }
+ }
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+ i915_request_put(request);
+ intel_engine_pm_put(engine);
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out_batch;
+
+ pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ engine->name,
+ ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+ }
+
+out_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+ return err;
+}
+
+static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ const int ver = GRAPHICS_VER(i915);
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ if (ver >= 8) {
+ *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *cmd++ = lower_32_bits(vma->node.start);
+ *cmd++ = upper_32_bits(vma->node.start);
+ } else if (ver >= 6) {
+ *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
+ *cmd++ = lower_32_bits(vma->node.start);
+ } else {
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cmd++ = lower_32_bits(vma->node.start);
+ }
+ *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+
+ __i915_gem_object_flush_map(obj, 0, 64);
+ i915_gem_object_unpin_map(obj);
+
+ intel_gt_chipset_flush(to_gt(i915));
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int recursive_batch_resolve(struct i915_vma *batch)
+{
+ u32 *cmd;
+
+ cmd = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
+
+ *cmd = MI_BATCH_BUFFER_END;
+
+ __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
+ i915_gem_object_unpin_map(batch->obj);
+
+ intel_gt_chipset_flush(batch->vm->gt);
+
+ return 0;
+}
+
+static int live_all_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ struct i915_request **request;
+ struct igt_live_test t;
+ struct i915_vma *batch;
+ unsigned int idx;
+ int err;
+
+ /*
+ * Check we can submit requests to all engines simultaneously. We
+ * send a recursive batch to each engine - checking that we don't
+ * block doing so, and that they don't complete too soon.
+ */
+
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ goto out_free;
+
+ batch = recursive_batch(i915);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
+ goto out_free;
+ }
+
+ i915_vma_lock(batch);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ request[idx] = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
+ pr_err("%s: Request allocation failed with err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+
+ err = i915_request_await_object(request[idx], batch->obj, 0);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[idx], 0);
+ GEM_BUG_ON(err);
+
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
+ idx++;
+ }
+
+ i915_vma_unlock(batch);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (i915_request_completed(request[idx])) {
+ pr_err("%s(%s): request completed too early!\n",
+ __func__, engine->name);
+ err = -EINVAL;
+ goto out_request;
+ }
+ idx++;
+ }
+
+ err = recursive_batch_resolve(batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
+ goto out_request;
+ }
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ long timeout;
+
+ timeout = i915_request_wait(request[idx], 0,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("%s: error waiting for request on %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ i915_request_put(request[idx]);
+ request[idx] = NULL;
+ idx++;
+ }
+
+ err = igt_live_test_end(&t);
+
+out_request:
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (request[idx])
+ i915_request_put(request[idx]);
+ idx++;
+ }
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+out_free:
+ kfree(request);
+ return err;
+}
+
+static int live_sequential_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct i915_request **request;
+ struct i915_request *prev = NULL;
+ struct intel_engine_cs *engine;
+ struct igt_live_test t;
+ unsigned int idx;
+ int err;
+
+ /*
+ * Check we can submit requests to all engines sequentially, such
+ * that each successive request waits for the earlier ones. This
+ * tests that we don't execute requests out of order, even though
+ * they are running on independent engines.
+ */
+
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ goto out_free;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct i915_vma *batch;
+
+ batch = recursive_batch(i915);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch for %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_free;
+ }
+
+ i915_vma_lock(batch);
+ request[idx] = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
+ pr_err("%s: Request allocation failed for %s with err=%d\n",
+ __func__, engine->name, err);
+ goto out_unlock;
+ }
+
+ if (prev) {
+ err = i915_request_await_dma_fence(request[idx],
+ &prev->fence);
+ if (err) {
+ i915_request_add(request[idx]);
+ pr_err("%s: Request await failed for %s with err=%d\n",
+ __func__, engine->name, err);
+ goto out_unlock;
+ }
+ }
+
+ err = i915_request_await_object(request[idx],
+ batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[idx], 0);
+ GEM_BUG_ON(err);
+
+ err = engine->emit_bb_start(request[idx],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[idx]->batch = batch;
+
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
+
+ prev = request[idx];
+ idx++;
+
+out_unlock:
+ i915_vma_unlock(batch);
+ if (err)
+ goto out_request;
+ }
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ long timeout;
+
+ if (i915_request_completed(request[idx])) {
+ pr_err("%s(%s): request completed too early!\n",
+ __func__, engine->name);
+ err = -EINVAL;
+ goto out_request;
+ }
+
+ err = recursive_batch_resolve(request[idx]->batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+
+ timeout = i915_request_wait(request[idx], 0,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("%s: error waiting for request on %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ idx++;
+ }
+
+ err = igt_live_test_end(&t);
+
+out_request:
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ u32 *cmd;
+
+ if (!request[idx])
+ break;
+
+ cmd = i915_gem_object_pin_map_unlocked(request[idx]->batch->obj,
+ I915_MAP_WC);
+ if (!IS_ERR(cmd)) {
+ *cmd = MI_BATCH_BUFFER_END;
+
+ __i915_gem_object_flush_map(request[idx]->batch->obj,
+ 0, sizeof(*cmd));
+ i915_gem_object_unpin_map(request[idx]->batch->obj);
+
+ intel_gt_chipset_flush(engine->gt);
+ }
+
+ i915_vma_put(request[idx]->batch);
+ i915_request_put(request[idx]);
+ idx++;
+ }
+out_free:
+ kfree(request);
+ return err;
+}
+
+struct parallel_thread {
+ struct kthread_worker *worker;
+ struct kthread_work work;
+ struct intel_engine_cs *engine;
+ int result;
+};
+
+static void __live_parallel_engine1(struct kthread_work *work)
+{
+ struct parallel_thread *thread =
+ container_of(work, typeof(*thread), work);
+ struct intel_engine_cs *engine = thread->engine;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+
+ count = 0;
+ intel_engine_pm_get(engine);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ intel_engine_pm_put(engine);
+
+ pr_info("%s: %lu request + sync\n", engine->name, count);
+ thread->result = err;
+}
+
+static void __live_parallel_engineN(struct kthread_work *work)
+{
+ struct parallel_thread *thread =
+ container_of(work, typeof(*thread), work);
+ struct intel_engine_cs *engine = thread->engine;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+
+ count = 0;
+ intel_engine_pm_get(engine);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ intel_engine_pm_put(engine);
+
+ pr_info("%s: %lu requests\n", engine->name, count);
+ thread->result = err;
+}
+
+static bool wake_all(struct drm_i915_private *i915)
+{
+ if (atomic_dec_and_test(&i915->selftest.counter)) {
+ wake_up_var(&i915->selftest.counter);
+ return true;
+ }
+
+ return false;
+}
+
+static int wait_for_all(struct drm_i915_private *i915)
+{
+ if (wake_all(i915))
+ return 0;
+
+ if (wait_var_event_timeout(&i915->selftest.counter,
+ !atomic_read(&i915->selftest.counter),
+ i915_selftest.timeout_jiffies))
+ return 0;
+
+ return -ETIME;
+}
+
+static void __live_parallel_spin(struct kthread_work *work)
+{
+ struct parallel_thread *thread =
+ container_of(work, typeof(*thread), work);
+ struct intel_engine_cs *engine = thread->engine;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ /*
+ * Create a spinner running for eternity on each engine. If a second
+ * spinner is incorrectly placed on the same engine, it will not be
+ * able to start in time.
+ */
+
+ if (igt_spinner_init(&spin, engine->gt)) {
+ wake_all(engine->i915);
+ thread->result = -ENOMEM;
+ return;
+ }
+
+ intel_engine_pm_get(engine);
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP); /* no preemption */
+ intel_engine_pm_put(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ if (err == -ENODEV)
+ err = 0;
+ wake_all(engine->i915);
+ goto out_spin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin, rq)) {
+ /* Occupy this engine for the whole test */
+ err = wait_for_all(engine->i915);
+ } else {
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ err = -EINVAL;
+ }
+ igt_spinner_end(&spin);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_fini(&spin);
+ thread->result = err;
+}
+
+static int live_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static void (* const func[])(struct kthread_work *) = {
+ __live_parallel_engine1,
+ __live_parallel_engineN,
+ __live_parallel_spin,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct parallel_thread *threads;
+ struct intel_engine_cs *engine;
+ void (* const *fn)(struct kthread_work *);
+ int err = 0;
+
+ /*
+ * Check we can submit requests to all engines concurrently. This
+ * tests that we load up the system maximally.
+ */
+
+ threads = kcalloc(nengines, sizeof(*threads), GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ for (fn = func; !err && *fn; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+ unsigned int idx;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ atomic_set(&i915->selftest.counter, nengines);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct kthread_worker *worker;
+
+ worker = kthread_create_worker(0, "igt/parallel:%s",
+ engine->name);
+ if (IS_ERR(worker)) {
+ err = PTR_ERR(worker);
+ break;
+ }
+
+ threads[idx].worker = worker;
+ threads[idx].result = 0;
+ threads[idx].engine = engine;
+
+ kthread_init_work(&threads[idx].work, *fn);
+ kthread_queue_work(worker, &threads[idx].work);
+ idx++;
+ }
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (!threads[idx].worker)
+ break;
+
+ kthread_flush_work(&threads[idx].work);
+ status = READ_ONCE(threads[idx].result);
+ if (status && !err)
+ err = status;
+
+ kthread_destroy_worker(threads[idx++].worker);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+ kfree(threads);
+ return err;
+}
+
+static int
+max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ int ret;
+
+ /*
+ * Before execlists, all contexts share the same ringbuffer. With
+ * execlists, each context/engine has a separate ringbuffer and
+ * for the purposes of this test, inexhaustible.
+ *
+ * For the global ringbuffer though, we have to be very careful
+ * that we do not wrap while preventing the execution of requests
+ * with a unsignaled fence.
+ */
+ if (HAS_EXECLISTS(ctx->i915))
+ return INT_MAX;
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ } else {
+ int sz;
+
+ ret = rq->ring->size - rq->reserved_space;
+ i915_request_add(rq);
+
+ sz = rq->ring->emit - rq->head;
+ if (sz < 0)
+ sz += rq->ring->size;
+ ret /= sz;
+ ret /= 2; /* leave half spare, in case of emergency! */
+ }
+
+ return ret;
+}
+
+static int live_breadcrumbs_smoketest(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const unsigned int nengines = num_uabi_engines(i915);
+ const unsigned int ncpus = num_online_cpus();
+ unsigned long num_waits, num_fences;
+ struct intel_engine_cs *engine;
+ struct smoke_thread *threads;
+ struct igt_live_test live;
+ intel_wakeref_t wakeref;
+ struct smoketest *smoke;
+ unsigned int n, idx;
+ struct file *file;
+ int ret = 0;
+
+ /*
+ * Smoketest our breadcrumb/signal handling for requests across multiple
+ * threads. A very simple test to only catch the most egregious of bugs.
+ * See __igt_breadcrumbs_smoketest();
+ *
+ * On real hardware this time.
+ */
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ file = mock_file(i915);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_rpm;
+ }
+
+ smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
+ if (!smoke) {
+ ret = -ENOMEM;
+ goto out_file;
+ }
+
+ threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
+ if (!threads) {
+ ret = -ENOMEM;
+ goto out_smoke;
+ }
+
+ smoke[0].request_alloc = __live_request_alloc;
+ smoke[0].ncontexts = 64;
+ smoke[0].contexts = kcalloc(smoke[0].ncontexts,
+ sizeof(*smoke[0].contexts),
+ GFP_KERNEL);
+ if (!smoke[0].contexts) {
+ ret = -ENOMEM;
+ goto out_threads;
+ }
+
+ for (n = 0; n < smoke[0].ncontexts; n++) {
+ smoke[0].contexts[n] = live_context(i915, file);
+ if (IS_ERR(smoke[0].contexts[n])) {
+ ret = PTR_ERR(smoke[0].contexts[n]);
+ goto out_contexts;
+ }
+ }
+
+ ret = igt_live_test_begin(&live, i915, __func__, "");
+ if (ret)
+ goto out_contexts;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ smoke[idx] = smoke[0];
+ smoke[idx].engine = engine;
+ smoke[idx].max_batch =
+ max_batches(smoke[0].contexts[0], engine);
+ if (smoke[idx].max_batch < 0) {
+ ret = smoke[idx].max_batch;
+ goto out_flush;
+ }
+ /* One ring interleaved between requests from all cpus */
+ smoke[idx].max_batch /= num_online_cpus() + 1;
+ pr_debug("Limiting batches to %d requests on %s\n",
+ smoke[idx].max_batch, engine->name);
+
+ for (n = 0; n < ncpus; n++) {
+ unsigned int i = idx * ncpus + n;
+ struct kthread_worker *worker;
+
+ worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
+ if (IS_ERR(worker)) {
+ ret = PTR_ERR(worker);
+ goto out_flush;
+ }
+
+ threads[i].worker = worker;
+ threads[i].t = &smoke[idx];
+
+ kthread_init_work(&threads[i].work,
+ __igt_breadcrumbs_smoketest);
+ kthread_queue_work(worker, &threads[i].work);
+ }
+
+ idx++;
+ }
+
+ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+
+out_flush:
+ idx = 0;
+ num_waits = 0;
+ num_fences = 0;
+ for_each_uabi_engine(engine, i915) {
+ for (n = 0; n < ncpus; n++) {
+ unsigned int i = idx * ncpus + n;
+ int err;
+
+ if (!threads[i].worker)
+ continue;
+
+ WRITE_ONCE(threads[i].stop, true);
+ kthread_flush_work(&threads[i].work);
+ err = READ_ONCE(threads[i].result);
+ if (err < 0 && !ret)
+ ret = err;
+
+ kthread_destroy_worker(threads[i].worker);
+ }
+
+ num_waits += atomic_long_read(&smoke[idx].num_waits);
+ num_fences += atomic_long_read(&smoke[idx].num_fences);
+ idx++;
+ }
+ pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
+ num_waits, num_fences, idx, ncpus);
+
+ ret = igt_live_test_end(&live) ?: ret;
+out_contexts:
+ kfree(smoke[0].contexts);
+out_threads:
+ kfree(threads);
+out_smoke:
+ kfree(smoke);
+out_file:
+ fput(file);
+out_rpm:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return ret;
+}
+
+int i915_request_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_nop_request),
+ SUBTEST(live_all_engines),
+ SUBTEST(live_sequential_engines),
+ SUBTEST(live_parallel_engines),
+ SUBTEST(live_empty_request),
+ SUBTEST(live_cancel_request),
+ SUBTEST(live_breadcrumbs_smoketest),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
+
+static int switch_to_kernel_sync(struct intel_context *ce, int err)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ while (!err && !intel_engine_is_idle(ce->engine))
+ intel_engine_flush_submission(ce->engine);
+
+ return err;
+}
+
+struct perf_stats {
+ struct intel_engine_cs *engine;
+ unsigned long count;
+ ktime_t time;
+ ktime_t busy;
+ u64 runtime;
+};
+
+struct perf_series {
+ struct drm_i915_private *i915;
+ unsigned int nengines;
+ struct intel_context *ce[];
+};
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static u32 trifilter(u32 *a)
+{
+ u64 sum;
+
+#define TF_COUNT 5
+ sort(a, TF_COUNT, sizeof(*a), cmp_u32, NULL);
+
+ sum = mul_u32_u32(a[2], 2);
+ sum += a[1];
+ sum += a[3];
+
+ GEM_BUG_ON(sum > U32_MAX);
+ return sum;
+#define TF_BIAS 2
+}
+
+static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
+{
+ u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles);
+
+ return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
+}
+
+static u32 *emit_timestamp_store(u32 *cs, struct intel_context *ce, u32 offset)
+{
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP((ce->engine->mmio_base)));
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_store_dw(u32 *cs, u32 offset, u32 value)
+{
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
+static u32 *emit_semaphore_poll(u32 *cs, u32 mode, u32 value, u32 offset)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ mode;
+ *cs++ = value;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *emit_semaphore_poll_until(u32 *cs, u32 offset, u32 value)
+{
+ return emit_semaphore_poll(cs, MI_SEMAPHORE_SAD_EQ_SDD, value, offset);
+}
+
+static void semaphore_set(u32 *sema, u32 value)
+{
+ WRITE_ONCE(*sema, value);
+ wmb(); /* flush the update to the cache, and beyond */
+}
+
+static u32 *hwsp_scratch(const struct intel_context *ce)
+{
+ return memset32(ce->engine->status_page.addr + 1000, 0, 21);
+}
+
+static u32 hwsp_offset(const struct intel_context *ce, u32 *dw)
+{
+ return (i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(dw));
+}
+
+static int measure_semaphore_response(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how many cycles it takes for the HW to detect the change
+ * in a semaphore value.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * poke semaphore
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Semaphore latency: B - A
+ */
+
+ semaphore_set(sema, -1);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4 + 12 * ARRAY_SIZE(elapsed));
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset, 0);
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+ cs = emit_store_dw(cs, offset, 0);
+ }
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ if (wait_for(READ_ONCE(*sema) == 0, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ preempt_disable();
+ cycles = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ semaphore_set(sema, i);
+ preempt_enable();
+
+ if (wait_for(READ_ONCE(*sema) == 0, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ elapsed[i - 1] = sema[i] - cycles;
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: semaphore response %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_idle_dispatch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for us to submit a request while the
+ * engine is idle, but is resting in our context.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Submission latency: B - A
+ */
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ return err;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ preempt_disable();
+ local_bh_disable();
+ elapsed[i] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ local_bh_enable();
+ preempt_enable();
+ }
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++)
+ elapsed[i] = sema[i] - elapsed[i];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: idle dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_busy_dispatch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT + 1], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for us to submit a request while the
+ * engine is busy, polling on a semaphore in our context. With
+ * direct submission, this will include the cost of a lite restore.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Submission latency: B - A
+ */
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset + i * sizeof(u32), -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ if (i > 1 && wait_for(READ_ONCE(sema[i - 1]), 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ preempt_disable();
+ local_bh_disable();
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ local_bh_enable();
+ semaphore_set(sema, i - 1);
+ preempt_enable();
+ }
+
+ wait_for(READ_ONCE(sema[i - 1]), 500);
+ semaphore_set(sema, i - 1);
+
+ for (i = 1; i <= TF_COUNT; i++) {
+ GEM_BUG_ON(sema[i] == -1);
+ elapsed[i - 1] = sema[i] - elapsed[i];
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: busy dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int plug(struct intel_engine_cs *engine, u32 *sema, u32 mode, int value)
+{
+ const u32 offset =
+ i915_ggtt_offset(engine->status_page.vma) +
+ offset_in_page(sema);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ cs = emit_semaphore_poll(cs, mode, value, offset);
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static int measure_inter_request(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT + 1], cycles;
+ struct i915_sw_fence *submit;
+ int i, err;
+
+ /*
+ * Measure how long it takes to advance from one request into the
+ * next. Between each request we flush the GPU caches to memory,
+ * update the breadcrumbs, and then invalidate those caches.
+ * We queue up all the requests to be submitted in one batch so
+ * it should be one set of contiguous measurements.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * advance request
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Request latency: B - A
+ */
+
+ err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
+ if (err)
+ return err;
+
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ semaphore_set(sema, 1);
+ return -ENOMEM;
+ }
+
+ intel_engine_flush_submission(ce->engine);
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_submit;
+ }
+
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ submit,
+ GFP_KERNEL);
+ if (err < 0) {
+ i915_request_add(rq);
+ goto err_submit;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err_submit;
+ }
+
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+ }
+ i915_sw_fence_commit(submit);
+ intel_engine_flush_submission(ce->engine);
+ heap_fence_put(submit);
+
+ semaphore_set(sema, 1);
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[i + 1] - sema[i];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: inter-request latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err_submit:
+ i915_sw_fence_commit(submit);
+ heap_fence_put(submit);
+ semaphore_set(sema, 1);
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_context_switch(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ struct i915_request *fence = NULL;
+ u32 elapsed[TF_COUNT + 1], cycles;
+ int i, j, err;
+ u32 *cs;
+
+ /*
+ * Measure how long it takes to advance from one request in one
+ * context to a request in another context. This allows us to
+ * measure how long the context save/restore take, along with all
+ * the inter-context setup we require.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * switch context
+ * B: read CS_TIMESTAMP on GPU
+ *
+ * Context switch latency: B - A
+ */
+
+ err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
+ if (err)
+ return err;
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct intel_context *arr[] = {
+ ce, ce->engine->kernel_context
+ };
+ u32 addr = offset + ARRAY_SIZE(arr) * i * sizeof(u32);
+
+ for (j = 0; j < ARRAY_SIZE(arr); j++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(arr[j]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_fence;
+ }
+
+ if (fence) {
+ err = i915_request_await_dma_fence(rq,
+ &fence->fence);
+ if (err) {
+ i915_request_add(rq);
+ goto err_fence;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err_fence;
+ }
+
+ cs = emit_timestamp_store(cs, ce, addr);
+ addr += sizeof(u32);
+
+ intel_ring_advance(rq, cs);
+
+ i915_request_put(fence);
+ fence = i915_request_get(rq);
+
+ i915_request_add(rq);
+ }
+ }
+ i915_request_put(fence);
+ intel_engine_flush_submission(ce->engine);
+
+ semaphore_set(sema, 1);
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 2] - sema[2 * i + 1];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: context switch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err_fence:
+ i915_request_put(fence);
+ semaphore_set(sema, 1);
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static int measure_preemption(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * We measure two latencies while triggering preemption. The first
+ * latency is how long it takes for us to submit a preempting request.
+ * The second latency is how it takes for us to return from the
+ * preemption back to the original context.
+ *
+ * A: read CS_TIMESTAMP from CPU
+ * submit preemption
+ * B: read CS_TIMESTAMP on GPU (in preempting context)
+ * context switch
+ * C: read CS_TIMESTAMP on GPU (in original context)
+ *
+ * Preemption dispatch latency: B - A
+ * Preemption switch latency: C - B
+ */
+
+ if (!intel_engine_has_preemption(ce->engine))
+ return 0;
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ u32 addr = offset + 2 * i * sizeof(u32);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, addr, -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, addr + sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ if (wait_for(READ_ONCE(sema[2 * i]) == -1, 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ rq = i915_request_create(ce->engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_timestamp_store(cs, ce, addr);
+ cs = emit_store_dw(cs, offset, i);
+
+ intel_ring_advance(rq, cs);
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ i915_request_add(rq);
+ }
+
+ if (wait_for(READ_ONCE(sema[2 * i - 2]) != -1, 500)) {
+ err = -EIO;
+ goto err;
+ }
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 0] - elapsed[i - 1];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: preemption dispatch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ for (i = 1; i <= TF_COUNT; i++)
+ elapsed[i - 1] = sema[2 * i + 1] - sema[2 * i + 0];
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: preemption switch latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+struct signal_cb {
+ struct dma_fence_cb base;
+ bool seen;
+};
+
+static void signal_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct signal_cb *s = container_of(cb, typeof(*s), base);
+
+ smp_store_mb(s->seen, true); /* be safe, be strong */
+}
+
+static int measure_completion(struct intel_context *ce)
+{
+ u32 *sema = hwsp_scratch(ce);
+ const u32 offset = hwsp_offset(ce, sema);
+ u32 elapsed[TF_COUNT], cycles;
+ u32 *cs;
+ int err;
+ int i;
+
+ /*
+ * Measure how long it takes for the signal (interrupt) to be
+ * sent from the GPU to be processed by the CPU.
+ *
+ * A: read CS_TIMESTAMP on GPU
+ * signal
+ * B: read CS_TIMESTAMP from CPU
+ *
+ * Completion latency: B - A
+ */
+
+ for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
+ struct signal_cb cb = { .seen = false };
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ cs = emit_store_dw(cs, offset + i * sizeof(u32), -1);
+ cs = emit_semaphore_poll_until(cs, offset, i);
+ cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
+
+ intel_ring_advance(rq, cs);
+
+ dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(ce->engine);
+ if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
+ err = -EIO;
+ goto err;
+ }
+
+ preempt_disable();
+ semaphore_set(sema, i);
+ while (!READ_ONCE(cb.seen))
+ cpu_relax();
+
+ elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
+ preempt_enable();
+ }
+
+ err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
+ if (err)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(elapsed); i++) {
+ GEM_BUG_ON(sema[i + 1] == -1);
+ elapsed[i] = elapsed[i] - sema[i + 1];
+ }
+
+ cycles = trifilter(elapsed);
+ pr_info("%s: completion latency %d cycles, %lluns\n",
+ ce->engine->name, cycles >> TF_BIAS,
+ cycles_to_ns(ce->engine, cycles));
+
+ return intel_gt_wait_for_idle(ce->engine->gt, HZ);
+
+err:
+ intel_gt_set_wedged(ce->engine->gt);
+ return err;
+}
+
+static void rps_pin(struct intel_gt *gt)
+{
+ /* Pin the frequency to max */
+ atomic_inc(&gt->rps.num_waiters);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ mutex_lock(&gt->rps.lock);
+ intel_rps_set(&gt->rps, gt->rps.max_freq);
+ mutex_unlock(&gt->rps.lock);
+}
+
+static void rps_unpin(struct intel_gt *gt)
+{
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ atomic_dec(&gt->rps.num_waiters);
+}
+
+static int perf_request_latency(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ int err = 0;
+
+ if (GRAPHICS_VER(i915) < 8) /* per-engine CS timestamp, semaphores */
+ return 0;
+
+ cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ st_engine_heartbeat_disable(engine);
+ rps_pin(engine->gt);
+
+ if (err == 0)
+ err = measure_semaphore_response(ce);
+ if (err == 0)
+ err = measure_idle_dispatch(ce);
+ if (err == 0)
+ err = measure_busy_dispatch(ce);
+ if (err == 0)
+ err = measure_inter_request(ce);
+ if (err == 0)
+ err = measure_context_switch(ce);
+ if (err == 0)
+ err = measure_preemption(ce);
+ if (err == 0)
+ err = measure_completion(ce);
+
+ rps_unpin(engine->gt);
+ st_engine_heartbeat_enable(engine);
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ if (err)
+ goto out;
+ }
+
+out:
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ cpu_latency_qos_remove_request(&qos);
+ return err;
+}
+
+static int s_sync0(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+static int s_sync1(void *arg)
+{
+ struct perf_series *ps = arg;
+ struct i915_request *prev = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+ int err = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (prev && i915_request_wait(prev, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+
+ return err;
+}
+
+static int s_many(void *arg)
+{
+ struct perf_series *ps = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned int idx = 0;
+
+ GEM_BUG_ON(!ps->nengines);
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ps->ce[idx]);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+
+ if (++idx == ps->nengines)
+ idx = 0;
+ } while (!__igt_timeout(end_time, NULL));
+
+ return 0;
+}
+
+static int perf_series_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ s_sync0,
+ s_sync1,
+ s_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct pm_qos_request qos;
+ struct perf_stats *stats;
+ struct perf_series *ps;
+ unsigned int idx;
+ int err = 0;
+
+ stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL);
+ if (!ps) {
+ kfree(stats);
+ return -ENOMEM;
+ }
+
+ cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
+
+ ps->i915 = i915;
+ ps->nengines = nengines;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ ps->ce[idx++] = ce;
+ }
+ GEM_BUG_ON(idx != ps->nengines);
+
+ for (fn = func; *fn && !err; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p =
+ memset(&stats[idx], 0, sizeof(stats[idx]));
+ struct intel_context *ce = ps->ce[idx];
+
+ p->engine = ps->ce[idx]->engine;
+ intel_engine_pm_get(p->engine);
+
+ if (intel_engine_supports_stats(p->engine))
+ p->busy = intel_engine_get_busy_time(p->engine,
+ &p->time) + 1;
+ else
+ p->time = ktime_get();
+ p->runtime = -intel_context_get_total_runtime_ns(ce);
+ }
+
+ err = (*fn)(ps);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+
+ for (idx = 0; idx < nengines; idx++) {
+ struct perf_stats *p = &stats[idx];
+ struct intel_context *ce = ps->ce[idx];
+ int integer, decimal;
+ u64 busy, dt, now;
+
+ if (p->busy)
+ p->busy = ktime_sub(intel_engine_get_busy_time(p->engine,
+ &now),
+ p->busy - 1);
+ else
+ now = ktime_get();
+ p->time = ktime_sub(now, p->time);
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime += intel_context_get_total_runtime_ns(ce);
+ intel_engine_pm_put(p->engine);
+
+ busy = 100 * ktime_to_ns(p->busy);
+ dt = ktime_to_ns(p->time);
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, p->engine->name, ce->timeline->seqno,
+ integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ }
+ }
+
+out:
+ for (idx = 0; idx < nengines; idx++) {
+ if (IS_ERR_OR_NULL(ps->ce[idx]))
+ break;
+
+ intel_context_unpin(ps->ce[idx]);
+ intel_context_put(ps->ce[idx]);
+ }
+ kfree(ps);
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(stats);
+ return err;
+}
+
+struct p_thread {
+ struct perf_stats p;
+ struct kthread_worker *worker;
+ struct kthread_work work;
+ struct intel_engine_cs *engine;
+ int result;
+};
+
+static void p_sync0(struct kthread_work *work)
+{
+ struct p_thread *thread = container_of(work, typeof(*thread), work);
+ struct perf_stats *p = &thread->p;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ thread->result = PTR_ERR(ce);
+ return;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ thread->result = err;
+ return;
+ }
+
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
+ busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
+ }
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ if (busy) {
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
+ p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ thread->result = err;
+}
+
+static void p_sync1(struct kthread_work *work)
+{
+ struct p_thread *thread = container_of(work, typeof(*thread), work);
+ struct perf_stats *p = &thread->p;
+ struct intel_engine_cs *engine = p->engine;
+ struct i915_request *prev = NULL;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ bool busy;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ thread->result = PTR_ERR(ce);
+ return;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ thread->result = err;
+ return;
+ }
+
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
+ busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
+ }
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (prev && i915_request_wait(prev, 0, HZ) < 0)
+ err = -ETIME;
+ i915_request_put(prev);
+ prev = rq;
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(prev);
+
+ if (busy) {
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
+ p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ thread->result = err;
+}
+
+static void p_many(struct kthread_work *work)
+{
+ struct p_thread *thread = container_of(work, typeof(*thread), work);
+ struct perf_stats *p = &thread->p;
+ struct intel_engine_cs *engine = p->engine;
+ struct intel_context *ce;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int err = 0;
+ bool busy;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ thread->result = PTR_ERR(ce);
+ return;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ thread->result = err;
+ return;
+ }
+
+ if (intel_engine_supports_stats(engine)) {
+ p->busy = intel_engine_get_busy_time(engine, &p->time);
+ busy = true;
+ } else {
+ p->time = ktime_get();
+ busy = false;
+ }
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ if (busy) {
+ ktime_t now;
+
+ p->busy = ktime_sub(intel_engine_get_busy_time(engine, &now),
+ p->busy);
+ p->time = ktime_sub(now, p->time);
+ } else {
+ p->time = ktime_sub(ktime_get(), p->time);
+ }
+
+ err = switch_to_kernel_sync(ce, err);
+ p->runtime = intel_context_get_total_runtime_ns(ce);
+ p->count = count;
+
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ thread->result = err;
+}
+
+static int perf_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static void (* const func[])(struct kthread_work *) = {
+ p_sync0,
+ p_sync1,
+ p_many,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ void (* const *fn)(struct kthread_work *);
+ struct intel_engine_cs *engine;
+ struct pm_qos_request qos;
+ struct p_thread *engines;
+ int err = 0;
+
+ engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return -ENOMEM;
+
+ cpu_latency_qos_add_request(&qos, 0);
+
+ for (fn = func; *fn; fn++) {
+ char name[KSYM_NAME_LEN];
+ struct igt_live_test t;
+ unsigned int idx;
+
+ snprintf(name, sizeof(name), "%ps", *fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
+ if (err)
+ break;
+
+ atomic_set(&i915->selftest.counter, nengines);
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct kthread_worker *worker;
+
+ intel_engine_pm_get(engine);
+
+ memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+
+ worker = kthread_create_worker(0, "igt:%s",
+ engine->name);
+ if (IS_ERR(worker)) {
+ err = PTR_ERR(worker);
+ intel_engine_pm_put(engine);
+ break;
+ }
+ engines[idx].worker = worker;
+ engines[idx].result = 0;
+ engines[idx].p.engine = engine;
+ engines[idx].engine = engine;
+
+ kthread_init_work(&engines[idx].work, *fn);
+ kthread_queue_work(worker, &engines[idx].work);
+ idx++;
+ }
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (!engines[idx].worker)
+ break;
+
+ kthread_flush_work(&engines[idx].work);
+ status = READ_ONCE(engines[idx].result);
+ if (status && !err)
+ err = status;
+
+ intel_engine_pm_put(engine);
+
+ kthread_destroy_worker(engines[idx].worker);
+ idx++;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ struct perf_stats *p = &engines[idx].p;
+ u64 busy = 100 * ktime_to_ns(p->busy);
+ u64 dt = ktime_to_ns(p->time);
+ int integer, decimal;
+
+ if (dt) {
+ integer = div64_u64(busy, dt);
+ busy -= integer * dt;
+ decimal = div64_u64(100 * busy, dt);
+ } else {
+ integer = 0;
+ decimal = 0;
+ }
+
+ GEM_BUG_ON(engine != p->engine);
+ pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+ name, engine->name, p->count, integer, decimal,
+ div_u64(p->runtime, 1000 * 1000),
+ div_u64(ktime_to_ns(p->time), 1000 * 1000));
+ idx++;
+ }
+ }
+
+ cpu_latency_qos_remove_request(&qos);
+ kfree(engines);
+ return err;
+}
+
+int i915_request_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_request_latency),
+ SUBTEST(perf_series_engines),
+ SUBTEST(perf_parallel_engines),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
new file mode 100644
index 000000000..39da0fb0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/random.h>
+
+#include "gt/intel_gt_pm.h"
+#include "i915_driver.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+
+struct i915_selftest i915_selftest __read_mostly = {
+ .timeout_ms = 500,
+};
+
+int i915_mock_sanitycheck(void)
+{
+ pr_info(DRIVER_NAME ": %s() - ok!\n", __func__);
+ return 0;
+}
+
+int i915_live_sanitycheck(struct drm_i915_private *i915)
+{
+ pr_info("%s: %s() - ok!\n", i915->drm.driver->name, __func__);
+ return 0;
+}
+
+enum {
+#define selftest(name, func) mock_##name,
+#include "i915_mock_selftests.h"
+#undef selftest
+};
+
+enum {
+#define selftest(name, func) live_##name,
+#include "i915_live_selftests.h"
+#undef selftest
+};
+
+enum {
+#define selftest(name, func) perf_##name,
+#include "i915_perf_selftests.h"
+#undef selftest
+};
+
+struct selftest {
+ bool enabled;
+ const char *name;
+ union {
+ int (*mock)(void);
+ int (*live)(struct drm_i915_private *);
+ };
+};
+
+#define selftest(n, f) [mock_##n] = { .name = #n, { .mock = f } },
+static struct selftest mock_selftests[] = {
+#include "i915_mock_selftests.h"
+};
+#undef selftest
+
+#define selftest(n, f) [live_##n] = { .name = #n, { .live = f } },
+static struct selftest live_selftests[] = {
+#include "i915_live_selftests.h"
+};
+#undef selftest
+
+#define selftest(n, f) [perf_##n] = { .name = #n, { .live = f } },
+static struct selftest perf_selftests[] = {
+#include "i915_perf_selftests.h"
+};
+#undef selftest
+
+/* Embed the line number into the parameter name so that we can order tests */
+#define selftest(n, func) selftest_0(n, func, param(n))
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, mock_selftests[mock_##n].enabled, bool, 0400);
+#include "i915_mock_selftests.h"
+#undef selftest_0
+#undef param
+
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __live_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, live_selftests[live_##n].enabled, bool, 0400);
+#include "i915_live_selftests.h"
+#undef selftest_0
+#undef param
+
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __perf_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, perf_selftests[perf_##n].enabled, bool, 0400);
+#include "i915_perf_selftests.h"
+#undef selftest_0
+#undef param
+#undef selftest
+
+static void set_default_test_all(struct selftest *st, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ if (st[i].enabled)
+ return;
+
+ for (i = 0; i < count; i++)
+ st[i].enabled = true;
+}
+
+static int __run_selftests(const char *name,
+ struct selftest *st,
+ unsigned int count,
+ void *data)
+{
+ int err = 0;
+
+ while (!i915_selftest.random_seed)
+ i915_selftest.random_seed = get_random_u32();
+
+ i915_selftest.timeout_jiffies =
+ i915_selftest.timeout_ms ?
+ msecs_to_jiffies_timeout(i915_selftest.timeout_ms) :
+ MAX_SCHEDULE_TIMEOUT;
+
+ set_default_test_all(st, count);
+
+ pr_info(DRIVER_NAME ": Performing %s selftests with st_random_seed=0x%x st_timeout=%u\n",
+ name, i915_selftest.random_seed, i915_selftest.timeout_ms);
+
+ /* Tests are listed in order in i915_*_selftests.h */
+ for (; count--; st++) {
+ if (!st->enabled)
+ continue;
+
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ pr_info(DRIVER_NAME ": Running %s\n", st->name);
+ if (data)
+ err = st->live(data);
+ else
+ err = st->mock();
+ if (err == -EINTR && !signal_pending(current))
+ err = 0;
+ if (err)
+ break;
+ }
+
+ if (WARN(err > 0 || err == -ENOTTY,
+ "%s returned %d, conflicting with selftest's magic values!\n",
+ st->name, err))
+ err = -1;
+
+ return err;
+}
+
+#define run_selftests(x, data) \
+ __run_selftests(#x, x##_selftests, ARRAY_SIZE(x##_selftests), data)
+
+int i915_mock_selftests(void)
+{
+ int err;
+
+ if (!i915_selftest.mock)
+ return 0;
+
+ err = run_selftests(mock, NULL);
+ if (err) {
+ i915_selftest.mock = err;
+ return 1;
+ }
+
+ if (i915_selftest.mock < 0) {
+ i915_selftest.mock = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
+int i915_live_selftests(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!i915_selftest.live)
+ return 0;
+
+ err = run_selftests(live, pdev_to_i915(pdev));
+ if (err) {
+ i915_selftest.live = err;
+ return err;
+ }
+
+ if (i915_selftest.live < 0) {
+ i915_selftest.live = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
+int i915_perf_selftests(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!i915_selftest.perf)
+ return 0;
+
+ err = run_selftests(perf, pdev_to_i915(pdev));
+ if (err) {
+ i915_selftest.perf = err;
+ return err;
+ }
+
+ if (i915_selftest.perf < 0) {
+ i915_selftest.perf = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
+static bool apply_subtest_filter(const char *caller, const char *name)
+{
+ char *filter, *sep, *tok;
+ bool result = true;
+
+ filter = kstrdup(i915_selftest.filter, GFP_KERNEL);
+ for (sep = filter; (tok = strsep(&sep, ","));) {
+ bool allow = true;
+ char *sl;
+
+ if (*tok == '!') {
+ allow = false;
+ tok++;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ sl = strchr(tok, '/');
+ if (sl) {
+ *sl++ = '\0';
+ if (strcmp(tok, caller)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+ tok = sl;
+ }
+
+ if (strcmp(tok, name)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+
+ result = allow;
+ break;
+ }
+ kfree(filter);
+
+ return result;
+}
+
+int __i915_nop_setup(void *data)
+{
+ return 0;
+}
+
+int __i915_nop_teardown(int err, void *data)
+{
+ return err;
+}
+
+int __i915_live_setup(void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(to_gt(i915)))
+ return -EIO;
+
+ return intel_gt_terminally_wedged(to_gt(i915));
+}
+
+int __i915_live_teardown(int err, void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ i915_gem_drain_freed_objects(i915);
+
+ return err;
+}
+
+int __intel_gt_live_setup(void *data)
+{
+ struct intel_gt *gt = data;
+
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(gt))
+ return -EIO;
+
+ return intel_gt_terminally_wedged(gt);
+}
+
+int __intel_gt_live_teardown(int err, void *data)
+{
+ struct intel_gt *gt = data;
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_gem_drain_freed_objects(gt->i915);
+
+ return err;
+}
+
+int __i915_subtests(const char *caller,
+ int (*setup)(void *data),
+ int (*teardown)(int err, void *data),
+ const struct i915_subtest *st,
+ unsigned int count,
+ void *data)
+{
+ int err;
+
+ for (; count--; st++) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ if (!apply_subtest_filter(caller, st->name))
+ continue;
+
+ err = setup(data);
+ if (err) {
+ pr_err(DRIVER_NAME "/%s: setup failed for %s\n",
+ caller, st->name);
+ return err;
+ }
+
+ pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ GEM_TRACE("Running %s/%s\n", caller, st->name);
+
+ err = teardown(st->func(data), data);
+ if (err && err != -EINTR) {
+ pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
+ caller, st->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
+{
+ va_list va;
+
+ if (!signal_pending(current)) {
+ cond_resched();
+ if (time_before(jiffies, timeout))
+ return false;
+ }
+
+ if (fmt) {
+ va_start(va, fmt);
+ vprintk(fmt, va);
+ va_end(va);
+ }
+
+ return true;
+}
+
+void igt_hexdump(const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ pr_info("*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ pr_info("[%04zx] %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
+module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400);
+module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
+module_param_named(st_filter, i915_selftest.filter, charp, 0400);
+
+module_param_named_unsafe(mock_selftests, i915_selftest.mock, int, 0400);
+MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then leave dummy module)");
+
+module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400);
+MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
+
+module_param_named_unsafe(perf_selftests, i915_selftest.perf, int, 0400);
+MODULE_PARM_DESC(perf_selftests, "Run performance orientated selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
new file mode 100644
index 000000000..daa985e5a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -0,0 +1,757 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+static int
+fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ switch (state) {
+ case FENCE_COMPLETE:
+ break;
+
+ case FENCE_FREE:
+ /* Leave the fence for the caller to free it after testing */
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct i915_sw_fence *alloc_fence(void)
+{
+ struct i915_sw_fence *fence;
+
+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ i915_sw_fence_init(fence, fence_notify);
+ return fence;
+}
+
+static void free_fence(struct i915_sw_fence *fence)
+{
+ i915_sw_fence_fini(fence);
+ kfree(fence);
+}
+
+static int __test_self(struct i915_sw_fence *fence)
+{
+ if (i915_sw_fence_done(fence))
+ return -EINVAL;
+
+ i915_sw_fence_commit(fence);
+ if (!i915_sw_fence_done(fence))
+ return -EINVAL;
+
+ i915_sw_fence_wait(fence);
+ if (!i915_sw_fence_done(fence))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int test_self(void *arg)
+{
+ struct i915_sw_fence *fence;
+ int ret;
+
+ /* Test i915_sw_fence signaling and completion testing */
+ fence = alloc_fence();
+ if (!fence)
+ return -ENOMEM;
+
+ ret = __test_self(fence);
+
+ free_fence(fence);
+ return ret;
+}
+
+static int test_dag(void *arg)
+{
+ struct i915_sw_fence *A, *B, *C;
+ int ret = -EINVAL;
+
+ /* Test detection of cycles within the i915_sw_fence graphs */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
+ return 0;
+
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+
+ if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
+ pr_err("recursive cycle not detected (AA)\n");
+ goto err_A;
+ }
+
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
+ if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
+ pr_err("single depth cycle not detected (BAB)\n");
+ goto err_B;
+ }
+
+ C = alloc_fence();
+ if (!C) {
+ ret = -ENOMEM;
+ goto err_B;
+ }
+
+ if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
+ pr_err("invalid cycle detected\n");
+ goto err_C;
+ }
+ if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
+ pr_err("single depth cycle not detected (CBC)\n");
+ goto err_C;
+ }
+ if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
+ pr_err("cycle not detected (BA, CB, AC)\n");
+ goto err_C;
+ }
+ if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
+ pr_err("invalid cycle detected\n");
+ goto err_C;
+ }
+
+ i915_sw_fence_commit(A);
+ i915_sw_fence_commit(B);
+ i915_sw_fence_commit(C);
+
+ ret = 0;
+ if (!i915_sw_fence_done(C)) {
+ pr_err("fence C not done\n");
+ ret = -EINVAL;
+ }
+ if (!i915_sw_fence_done(B)) {
+ pr_err("fence B not done\n");
+ ret = -EINVAL;
+ }
+ if (!i915_sw_fence_done(A)) {
+ pr_err("fence A not done\n");
+ ret = -EINVAL;
+ }
+err_C:
+ free_fence(C);
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_AB(void *arg)
+{
+ struct i915_sw_fence *A, *B;
+ int ret;
+
+ /* Test i915_sw_fence (A) waiting on an event source (B) */
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
+ if (ret < 0)
+ goto err_B;
+ if (ret == 0) {
+ pr_err("Incorrectly reported fence A was complete before await\n");
+ ret = -EINVAL;
+ goto err_B;
+ }
+
+ ret = -EINVAL;
+ i915_sw_fence_commit(A);
+ if (i915_sw_fence_done(A))
+ goto err_B;
+
+ i915_sw_fence_commit(B);
+ if (!i915_sw_fence_done(B)) {
+ pr_err("Fence B is not done\n");
+ goto err_B;
+ }
+
+ if (!i915_sw_fence_done(A)) {
+ pr_err("Fence A is not done\n");
+ goto err_B;
+ }
+
+ ret = 0;
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_ABC(void *arg)
+{
+ struct i915_sw_fence *A, *B, *C;
+ int ret;
+
+ /* Test a chain of fences, A waits on B who waits on C */
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ C = alloc_fence();
+ if (!C) {
+ ret = -ENOMEM;
+ goto err_B;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ pr_err("Incorrectly reported fence B was complete before await\n");
+ goto err_C;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ pr_err("Incorrectly reported fence C was complete before await\n");
+ goto err_C;
+ }
+
+ ret = -EINVAL;
+ i915_sw_fence_commit(A);
+ if (i915_sw_fence_done(A)) {
+ pr_err("Fence A completed early\n");
+ goto err_C;
+ }
+
+ i915_sw_fence_commit(B);
+ if (i915_sw_fence_done(B)) {
+ pr_err("Fence B completed early\n");
+ goto err_C;
+ }
+
+ if (i915_sw_fence_done(A)) {
+ pr_err("Fence A completed early (after signaling B)\n");
+ goto err_C;
+ }
+
+ i915_sw_fence_commit(C);
+
+ ret = 0;
+ if (!i915_sw_fence_done(C)) {
+ pr_err("Fence C not done\n");
+ ret = -EINVAL;
+ }
+ if (!i915_sw_fence_done(B)) {
+ pr_err("Fence B not done\n");
+ ret = -EINVAL;
+ }
+ if (!i915_sw_fence_done(A)) {
+ pr_err("Fence A not done\n");
+ ret = -EINVAL;
+ }
+err_C:
+ free_fence(C);
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_AB_C(void *arg)
+{
+ struct i915_sw_fence *A, *B, *C;
+ int ret = -EINVAL;
+
+ /* Test multiple fences (AB) waiting on a single event (C) */
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ C = alloc_fence();
+ if (!C) {
+ ret = -ENOMEM;
+ goto err_B;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto err_C;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto err_C;
+ }
+
+ i915_sw_fence_commit(A);
+ i915_sw_fence_commit(B);
+
+ ret = 0;
+ if (i915_sw_fence_done(A)) {
+ pr_err("Fence A completed early\n");
+ ret = -EINVAL;
+ }
+
+ if (i915_sw_fence_done(B)) {
+ pr_err("Fence B completed early\n");
+ ret = -EINVAL;
+ }
+
+ i915_sw_fence_commit(C);
+ if (!i915_sw_fence_done(C)) {
+ pr_err("Fence C not done\n");
+ ret = -EINVAL;
+ }
+
+ if (!i915_sw_fence_done(B)) {
+ pr_err("Fence B not done\n");
+ ret = -EINVAL;
+ }
+
+ if (!i915_sw_fence_done(A)) {
+ pr_err("Fence A not done\n");
+ ret = -EINVAL;
+ }
+
+err_C:
+ free_fence(C);
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_C_AB(void *arg)
+{
+ struct i915_sw_fence *A, *B, *C;
+ int ret;
+
+ /* Test multiple event sources (A,B) for a single fence (C) */
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ C = alloc_fence();
+ if (!C) {
+ ret = -ENOMEM;
+ goto err_B;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto err_C;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto err_C;
+ }
+
+ ret = 0;
+ i915_sw_fence_commit(C);
+ if (i915_sw_fence_done(C))
+ ret = -EINVAL;
+
+ i915_sw_fence_commit(A);
+ i915_sw_fence_commit(B);
+
+ if (!i915_sw_fence_done(A)) {
+ pr_err("Fence A not done\n");
+ ret = -EINVAL;
+ }
+
+ if (!i915_sw_fence_done(B)) {
+ pr_err("Fence B not done\n");
+ ret = -EINVAL;
+ }
+
+ if (!i915_sw_fence_done(C)) {
+ pr_err("Fence C not done\n");
+ ret = -EINVAL;
+ }
+
+err_C:
+ free_fence(C);
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_chain(void *arg)
+{
+ int nfences = 4096;
+ struct i915_sw_fence **fences;
+ int ret, i;
+
+ /* Test a long chain of fences */
+ fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
+
+ for (i = 0; i < nfences; i++) {
+ fences[i] = alloc_fence();
+ if (!fences[i]) {
+ nfences = i;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (i > 0) {
+ ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
+ fences[i - 1],
+ GFP_KERNEL);
+ if (ret < 0) {
+ nfences = i + 1;
+ goto err;
+ }
+
+ i915_sw_fence_commit(fences[i]);
+ }
+ }
+
+ ret = 0;
+ for (i = nfences; --i; ) {
+ if (i915_sw_fence_done(fences[i])) {
+ if (ret == 0)
+ pr_err("Fence[%d] completed early\n", i);
+ ret = -EINVAL;
+ }
+ }
+ i915_sw_fence_commit(fences[0]);
+ for (i = 0; ret == 0 && i < nfences; i++) {
+ if (!i915_sw_fence_done(fences[i])) {
+ pr_err("Fence[%d] is not done\n", i);
+ ret = -EINVAL;
+ }
+ }
+
+err:
+ for (i = 0; i < nfences; i++)
+ free_fence(fences[i]);
+ kfree(fences);
+ return ret;
+}
+
+struct task_ipc {
+ struct work_struct work;
+ struct completion started;
+ struct i915_sw_fence *in, *out;
+ int value;
+};
+
+static void task_ipc(struct work_struct *work)
+{
+ struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
+
+ complete(&ipc->started);
+
+ i915_sw_fence_wait(ipc->in);
+ smp_store_mb(ipc->value, 1);
+ i915_sw_fence_commit(ipc->out);
+}
+
+static int test_ipc(void *arg)
+{
+ struct task_ipc ipc;
+ int ret = 0;
+
+ /* Test use of i915_sw_fence as an interprocess signaling mechanism */
+ ipc.in = alloc_fence();
+ if (!ipc.in)
+ return -ENOMEM;
+ ipc.out = alloc_fence();
+ if (!ipc.out) {
+ ret = -ENOMEM;
+ goto err_in;
+ }
+
+ /* use a completion to avoid chicken-and-egg testing */
+ init_completion(&ipc.started);
+
+ ipc.value = 0;
+ INIT_WORK_ONSTACK(&ipc.work, task_ipc);
+ schedule_work(&ipc.work);
+
+ wait_for_completion(&ipc.started);
+
+ usleep_range(1000, 2000);
+ if (READ_ONCE(ipc.value)) {
+ pr_err("worker updated value before i915_sw_fence was signaled\n");
+ ret = -EINVAL;
+ }
+
+ i915_sw_fence_commit(ipc.in);
+ i915_sw_fence_wait(ipc.out);
+
+ if (!READ_ONCE(ipc.value)) {
+ pr_err("worker signaled i915_sw_fence before value was posted\n");
+ ret = -EINVAL;
+ }
+
+ flush_work(&ipc.work);
+ destroy_work_on_stack(&ipc.work);
+ free_fence(ipc.out);
+err_in:
+ free_fence(ipc.in);
+ return ret;
+}
+
+static int test_timer(void *arg)
+{
+ unsigned long target, delay;
+ struct timed_fence tf;
+
+ preempt_disable();
+ timed_fence_init(&tf, target = jiffies);
+ if (!i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence with immediate expiration not signaled\n");
+ goto err;
+ }
+ preempt_enable();
+ timed_fence_fini(&tf);
+
+ for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+ preempt_disable();
+ timed_fence_init(&tf, target = jiffies + delay);
+ if (i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
+ goto err;
+ }
+ preempt_enable();
+
+ i915_sw_fence_wait(&tf.fence);
+
+ preempt_disable();
+ if (!i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence not signaled after wait\n");
+ goto err;
+ }
+ if (time_before(jiffies, target)) {
+ pr_err("Fence signaled too early, target=%lu, now=%lu\n",
+ target, jiffies);
+ goto err;
+ }
+ preempt_enable();
+ timed_fence_fini(&tf);
+ }
+
+ return 0;
+
+err:
+ preempt_enable();
+ timed_fence_fini(&tf);
+ return -EINVAL;
+}
+
+static const char *mock_name(struct dma_fence *fence)
+{
+ return "mock";
+}
+
+static const struct dma_fence_ops mock_fence_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+};
+
+static DEFINE_SPINLOCK(mock_fence_lock);
+
+static struct dma_fence *alloc_dma_fence(void)
+{
+ struct dma_fence *dma;
+
+ dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+ if (dma)
+ dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
+
+ return dma;
+}
+
+static struct i915_sw_fence *
+wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
+{
+ struct i915_sw_fence *fence;
+ int err;
+
+ fence = alloc_fence();
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
+ i915_sw_fence_commit(fence);
+ if (err < 0) {
+ free_fence(fence);
+ return ERR_PTR(err);
+ }
+
+ return fence;
+}
+
+static int test_dma_fence(void *arg)
+{
+ struct i915_sw_fence *timeout = NULL, *not = NULL;
+ unsigned long delay = i915_selftest.timeout_jiffies;
+ unsigned long end, sleep;
+ struct dma_fence *dma;
+ int err;
+
+ dma = alloc_dma_fence();
+ if (!dma)
+ return -ENOMEM;
+
+ timeout = wrap_dma_fence(dma, delay);
+ if (IS_ERR(timeout)) {
+ err = PTR_ERR(timeout);
+ goto err;
+ }
+
+ not = wrap_dma_fence(dma, 0);
+ if (IS_ERR(not)) {
+ err = PTR_ERR(not);
+ goto err;
+ }
+
+ err = -EINVAL;
+ if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
+ pr_err("Fences immediately signaled\n");
+ goto err;
+ }
+
+ /* We round the timeout for the fence up to the next second */
+ end = round_jiffies_up(jiffies + delay);
+
+ sleep = jiffies_to_usecs(delay) / 3;
+ usleep_range(sleep, 2 * sleep);
+ if (time_after(jiffies, end)) {
+ pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
+ delay, end, jiffies);
+ goto skip;
+ }
+
+ if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
+ pr_err("Fences signaled too early\n");
+ goto err;
+ }
+
+ if (!wait_event_timeout(timeout->wait,
+ i915_sw_fence_done(timeout),
+ 2 * (end - jiffies) + 1)) {
+ pr_err("Timeout fence unsignaled!\n");
+ goto err;
+ }
+
+ if (i915_sw_fence_done(not)) {
+ pr_err("No timeout fence signaled!\n");
+ goto err;
+ }
+
+skip:
+ dma_fence_signal(dma);
+
+ if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
+ pr_err("Fences unsignaled\n");
+ goto err;
+ }
+
+ free_fence(not);
+ free_fence(timeout);
+ dma_fence_put(dma);
+
+ return 0;
+
+err:
+ dma_fence_signal(dma);
+ if (!IS_ERR_OR_NULL(timeout))
+ free_fence(timeout);
+ if (!IS_ERR_OR_NULL(not))
+ free_fence(not);
+ dma_fence_put(dma);
+ return err;
+}
+
+int i915_sw_fence_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(test_self),
+ SUBTEST(test_dag),
+ SUBTEST(test_AB),
+ SUBTEST(test_ABC),
+ SUBTEST(test_AB_C),
+ SUBTEST(test_C_AB),
+ SUBTEST(test_chain),
+ SUBTEST(test_ipc),
+ SUBTEST(test_timer),
+ SUBTEST(test_dma_fence),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
new file mode 100644
index 000000000..47f4ae18a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+static char *
+__sync_print(struct i915_syncmap *p,
+ char *buf, unsigned long *sz,
+ unsigned int depth,
+ unsigned int last,
+ unsigned int idx)
+{
+ unsigned long len;
+ unsigned int i, X;
+
+ if (depth) {
+ unsigned int d;
+
+ for (d = 0; d < depth - 1; d++) {
+ if (last & BIT(depth - d - 1))
+ len = scnprintf(buf, *sz, "| ");
+ else
+ len = scnprintf(buf, *sz, " ");
+ buf += len;
+ *sz -= len;
+ }
+ len = scnprintf(buf, *sz, "%x-> ", idx);
+ buf += len;
+ *sz -= len;
+ }
+
+ /* We mark bits after the prefix as "X" */
+ len = scnprintf(buf, *sz, "0x%016llx", p->prefix << p->height << SHIFT);
+ buf += len;
+ *sz -= len;
+ X = (p->height + SHIFT) / 4;
+ scnprintf(buf - X, *sz + X, "%*s", X, "XXXXXXXXXXXXXXXXX");
+
+ if (!p->height) {
+ for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
+ len = scnprintf(buf, *sz, " %x:%x,",
+ i, __sync_seqno(p)[i]);
+ buf += len;
+ *sz -= len;
+ }
+ buf -= 1;
+ *sz += 1;
+ }
+
+ len = scnprintf(buf, *sz, "\n");
+ buf += len;
+ *sz -= len;
+
+ if (p->height) {
+ for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
+ buf = __sync_print(__sync_child(p)[i], buf, sz,
+ depth + 1,
+ last << 1 | !!(p->bitmap >> (i + 1)),
+ i);
+ }
+ }
+
+ return buf;
+}
+
+static bool
+i915_syncmap_print_to_buf(struct i915_syncmap *p, char *buf, unsigned long sz)
+{
+ if (!p)
+ return false;
+
+ while (p->parent)
+ p = p->parent;
+
+ __sync_print(p, buf, &sz, 0, 1, 0);
+ return true;
+}
+
+static int check_syncmap_free(struct i915_syncmap **sync)
+{
+ i915_syncmap_free(sync);
+ if (*sync) {
+ pr_err("sync not cleared after free\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dump_syncmap(struct i915_syncmap *sync, int err)
+{
+ char *buf;
+
+ if (!err)
+ return check_syncmap_free(&sync);
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto skip;
+
+ if (i915_syncmap_print_to_buf(sync, buf, PAGE_SIZE))
+ pr_err("%s", buf);
+
+ kfree(buf);
+
+skip:
+ i915_syncmap_free(&sync);
+ return err;
+}
+
+static int igt_syncmap_init(void *arg)
+{
+ struct i915_syncmap *sync = (void *)~0ul;
+
+ /*
+ * Cursory check that we can initialise a random pointer and transform
+ * it into the root pointer of a syncmap.
+ */
+
+ i915_syncmap_init(&sync);
+ return check_syncmap_free(&sync);
+}
+
+static int check_seqno(struct i915_syncmap *leaf, unsigned int idx, u32 seqno)
+{
+ if (leaf->height) {
+ pr_err("%s: not a leaf, height is %d\n",
+ __func__, leaf->height);
+ return -EINVAL;
+ }
+
+ if (__sync_seqno(leaf)[idx] != seqno) {
+ pr_err("%s: seqno[%d], found %x, expected %x\n",
+ __func__, idx, __sync_seqno(leaf)[idx], seqno);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_one(struct i915_syncmap **sync, u64 context, u32 seqno)
+{
+ int err;
+
+ err = i915_syncmap_set(sync, context, seqno);
+ if (err)
+ return err;
+
+ if ((*sync)->height) {
+ pr_err("Inserting first context=%llx did not return leaf (height=%d, prefix=%llx\n",
+ context, (*sync)->height, (*sync)->prefix);
+ return -EINVAL;
+ }
+
+ if ((*sync)->parent) {
+ pr_err("Inserting first context=%llx created branches!\n",
+ context);
+ return -EINVAL;
+ }
+
+ if (hweight32((*sync)->bitmap) != 1) {
+ pr_err("First bitmap does not contain a single entry, found %x (count=%d)!\n",
+ (*sync)->bitmap, hweight32((*sync)->bitmap));
+ return -EINVAL;
+ }
+
+ err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno);
+ if (err)
+ return err;
+
+ if (!i915_syncmap_is_later(sync, context, seqno)) {
+ pr_err("Lookup of first context=%llx/seqno=%x failed!\n",
+ context, seqno);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_syncmap_one(void *arg)
+{
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ struct i915_syncmap *sync;
+ unsigned long max = 1;
+ int err;
+
+ /*
+ * Check that inserting a new id, creates a leaf and only that leaf.
+ */
+
+ i915_syncmap_init(&sync);
+
+ do {
+ u64 context = i915_prandom_u64_state(&prng);
+ unsigned long loop;
+
+ err = check_syncmap_free(&sync);
+ if (err)
+ goto out;
+
+ for (loop = 0; loop <= max; loop++) {
+ err = check_one(&sync, context,
+ prandom_u32_state(&prng));
+ if (err)
+ goto out;
+ }
+ max++;
+ } while (!__igt_timeout(end_time, NULL));
+ pr_debug("%s: Completed %lu single insertions\n",
+ __func__, max * (max - 1) / 2);
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int check_leaf(struct i915_syncmap **sync, u64 context, u32 seqno)
+{
+ int err;
+
+ err = i915_syncmap_set(sync, context, seqno);
+ if (err)
+ return err;
+
+ if ((*sync)->height) {
+ pr_err("Inserting context=%llx did not return leaf (height=%d, prefix=%llx\n",
+ context, (*sync)->height, (*sync)->prefix);
+ return -EINVAL;
+ }
+
+ if (hweight32((*sync)->bitmap) != 1) {
+ pr_err("First entry into leaf (context=%llx) does not contain a single entry, found %x (count=%d)!\n",
+ context, (*sync)->bitmap, hweight32((*sync)->bitmap));
+ return -EINVAL;
+ }
+
+ err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno);
+ if (err)
+ return err;
+
+ if (!i915_syncmap_is_later(sync, context, seqno)) {
+ pr_err("Lookup of first entry context=%llx/seqno=%x failed!\n",
+ context, seqno);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_syncmap_join_above(void *arg)
+{
+ struct i915_syncmap *sync;
+ unsigned int pass, order;
+ int err;
+
+ i915_syncmap_init(&sync);
+
+ /*
+ * When we have a new id that doesn't fit inside the existing tree,
+ * we need to add a new layer above.
+ *
+ * 1: 0x00000001
+ * 2: 0x00000010
+ * 3: 0x00000100
+ * 4: 0x00001000
+ * ...
+ * Each pass the common prefix shrinks and we have to insert a join.
+ * Each join will only contain two branches, the latest of which
+ * is always a leaf.
+ *
+ * If we then reuse the same set of contexts, we expect to build an
+ * identical tree.
+ */
+ for (pass = 0; pass < 3; pass++) {
+ for (order = 0; order < 64; order += SHIFT) {
+ u64 context = BIT_ULL(order);
+ struct i915_syncmap *join;
+
+ err = check_leaf(&sync, context, 0);
+ if (err)
+ goto out;
+
+ join = sync->parent;
+ if (!join) /* very first insert will have no parents */
+ continue;
+
+ if (!join->height) {
+ pr_err("Parent with no height!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (hweight32(join->bitmap) != 2) {
+ pr_err("Join does not have 2 children: %x (%d)\n",
+ join->bitmap, hweight32(join->bitmap));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (__sync_child(join)[__sync_branch_idx(join, context)] != sync) {
+ pr_err("Leaf misplaced in parent!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int igt_syncmap_join_below(void *arg)
+{
+ struct i915_syncmap *sync;
+ unsigned int step, order, idx;
+ int err = -ENODEV;
+
+ i915_syncmap_init(&sync);
+
+ /*
+ * Check that we can split a compacted branch by replacing it with
+ * a join.
+ */
+ for (step = 0; step < KSYNCMAP; step++) {
+ for (order = 64 - SHIFT; order > 0; order -= SHIFT) {
+ u64 context = step * BIT_ULL(order);
+
+ err = i915_syncmap_set(&sync, context, 0);
+ if (err)
+ goto out;
+
+ if (sync->height) {
+ pr_err("Inserting context=%llx (order=%d, step=%d) did not return leaf (height=%d, prefix=%llx\n",
+ context, order, step, sync->height, sync->prefix);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ for (step = 0; step < KSYNCMAP; step++) {
+ for (order = SHIFT; order < 64; order += SHIFT) {
+ u64 context = step * BIT_ULL(order);
+
+ if (!i915_syncmap_is_later(&sync, context, 0)) {
+ pr_err("1: context %llx (order=%d, step=%d) not found\n",
+ context, order, step);
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (idx = 1; idx < KSYNCMAP; idx++) {
+ if (i915_syncmap_is_later(&sync, context + idx, 0)) {
+ pr_err("1: context %llx (order=%d, step=%d) should not exist\n",
+ context + idx, order, step);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+ }
+
+ for (order = SHIFT; order < 64; order += SHIFT) {
+ for (step = 0; step < KSYNCMAP; step++) {
+ u64 context = step * BIT_ULL(order);
+
+ if (!i915_syncmap_is_later(&sync, context, 0)) {
+ pr_err("2: context %llx (order=%d, step=%d) not found\n",
+ context, order, step);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int igt_syncmap_neighbours(void *arg)
+{
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ struct i915_syncmap *sync;
+ int err = -ENODEV;
+
+ /*
+ * Each leaf holds KSYNCMAP seqno. Check that when we create KSYNCMAP
+ * neighbouring ids, they all fit into the same leaf.
+ */
+
+ i915_syncmap_init(&sync);
+ do {
+ u64 context = i915_prandom_u64_state(&prng) & ~MASK;
+ unsigned int idx;
+
+ if (i915_syncmap_is_later(&sync, context, 0)) /* Skip repeats */
+ continue;
+
+ for (idx = 0; idx < KSYNCMAP; idx++) {
+ err = i915_syncmap_set(&sync, context + idx, 0);
+ if (err)
+ goto out;
+
+ if (sync->height) {
+ pr_err("Inserting context=%llx did not return leaf (height=%d, prefix=%llx\n",
+ context, sync->height, sync->prefix);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (sync->bitmap != BIT(idx + 1) - 1) {
+ pr_err("Inserting neighbouring context=0x%llx+%d, did not fit into the same leaf bitmap=%x (%d), expected %lx (%d)\n",
+ context, idx,
+ sync->bitmap, hweight32(sync->bitmap),
+ BIT(idx + 1) - 1, idx + 1);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ } while (!__igt_timeout(end_time, NULL));
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int igt_syncmap_compact(void *arg)
+{
+ struct i915_syncmap *sync;
+ unsigned int idx, order;
+ int err = -ENODEV;
+
+ i915_syncmap_init(&sync);
+
+ /*
+ * The syncmap are "space efficient" compressed radix trees - any
+ * branch with only one child is skipped and replaced by the child.
+ *
+ * If we construct a tree with ids that are neighbouring at a non-zero
+ * height, we form a join but each child of that join is directly a
+ * leaf holding the single id.
+ */
+ for (order = SHIFT; order < 64; order += SHIFT) {
+ err = check_syncmap_free(&sync);
+ if (err)
+ goto out;
+
+ /* Create neighbours in the parent */
+ for (idx = 0; idx < KSYNCMAP; idx++) {
+ u64 context = idx * BIT_ULL(order) + idx;
+
+ err = i915_syncmap_set(&sync, context, 0);
+ if (err)
+ goto out;
+
+ if (sync->height) {
+ pr_err("Inserting context=%llx (order=%d, idx=%d) did not return leaf (height=%d, prefix=%llx\n",
+ context, order, idx,
+ sync->height, sync->prefix);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ sync = sync->parent;
+ if (sync->parent) {
+ pr_err("Parent (join) of last leaf was not the sync!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (sync->height != order) {
+ pr_err("Join does not have the expected height, found %d, expected %d\n",
+ sync->height, order);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (sync->bitmap != BIT(KSYNCMAP) - 1) {
+ pr_err("Join is not full!, found %x (%d) expected %lx (%d)\n",
+ sync->bitmap, hweight32(sync->bitmap),
+ BIT(KSYNCMAP) - 1, KSYNCMAP);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Each of our children should be a leaf */
+ for (idx = 0; idx < KSYNCMAP; idx++) {
+ struct i915_syncmap *leaf = __sync_child(sync)[idx];
+
+ if (leaf->height) {
+ pr_err("Child %d is a not leaf!\n", idx);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (leaf->parent != sync) {
+ pr_err("Child %d is not attached to us!\n",
+ idx);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!is_power_of_2(leaf->bitmap)) {
+ pr_err("Child %d holds more than one id, found %x (%d)\n",
+ idx, leaf->bitmap, hweight32(leaf->bitmap));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (leaf->bitmap != BIT(idx)) {
+ pr_err("Child %d has wrong seqno idx, found %d, expected %d\n",
+ idx, ilog2(leaf->bitmap), idx);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int igt_syncmap_random(void *arg)
+{
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ struct i915_syncmap *sync;
+ unsigned long count, phase, i;
+ u32 seqno;
+ int err;
+
+ i915_syncmap_init(&sync);
+
+ /*
+ * Having tried to test the individual operations within i915_syncmap,
+ * run a smoketest exploring the entire u64 space with random
+ * insertions.
+ */
+
+ count = 0;
+ phase = jiffies + HZ/100 + 1;
+ do {
+ u64 context = i915_prandom_u64_state(&prng);
+
+ err = i915_syncmap_set(&sync, context, 0);
+ if (err)
+ goto out;
+
+ count++;
+ } while (!time_after(jiffies, phase));
+ seqno = 0;
+
+ phase = 0;
+ do {
+ I915_RND_STATE(ctx);
+ u32 last_seqno = seqno;
+ bool expect;
+
+ seqno = prandom_u32_state(&prng);
+ expect = seqno_later(last_seqno, seqno);
+
+ for (i = 0; i < count; i++) {
+ u64 context = i915_prandom_u64_state(&ctx);
+
+ if (i915_syncmap_is_later(&sync, context, seqno) != expect) {
+ pr_err("context=%llu, last=%u this=%u did not match expectation (%d)\n",
+ context, last_seqno, seqno, expect);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = i915_syncmap_set(&sync, context, seqno);
+ if (err)
+ goto out;
+ }
+
+ phase++;
+ } while (!__igt_timeout(end_time, NULL));
+ pr_debug("Completed %lu passes, each of %lu contexts\n", phase, count);
+out:
+ return dump_syncmap(sync, err);
+}
+
+int i915_syncmap_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_syncmap_init),
+ SUBTEST(igt_syncmap_one),
+ SUBTEST(igt_syncmap_join_above),
+ SUBTEST(igt_syncmap_join_below),
+ SUBTEST(igt_syncmap_neighbours),
+ SUBTEST(igt_syncmap_compact),
+ SUBTEST(igt_syncmap_random),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
new file mode 100644
index 000000000..71b52d5ef
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -0,0 +1,1107 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
+#include "gem/selftests/mock_context.h"
+
+#include "i915_scatterlist.h"
+#include "i915_selftest.h"
+
+#include "mock_gem_device.h"
+#include "mock_gtt.h"
+
+static bool assert_vma(struct i915_vma *vma,
+ struct drm_i915_gem_object *obj,
+ struct i915_gem_context *ctx)
+{
+ bool ok = true;
+
+ if (vma->vm != ctx->vm) {
+ pr_err("VMA created with wrong VM\n");
+ ok = false;
+ }
+
+ if (vma->size != obj->base.size) {
+ pr_err("VMA created with wrong size, found %llu, expected %zu\n",
+ vma->size, obj->base.size);
+ ok = false;
+ }
+
+ if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
+ pr_err("VMA created with wrong type [%d]\n",
+ vma->gtt_view.type);
+ ok = false;
+ }
+
+ return ok;
+}
+
+static struct i915_vma *
+checked_vma_instance(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_gtt_view *view)
+{
+ struct i915_vma *vma;
+ bool ok = true;
+
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma))
+ return vma;
+
+ /* Manual checks, will be reinforced by i915_vma_compare! */
+ if (vma->vm != vm) {
+ pr_err("VMA's vm [%p] does not match request [%p]\n",
+ vma->vm, vm);
+ ok = false;
+ }
+
+ if (i915_is_ggtt(vm) != i915_vma_is_ggtt(vma)) {
+ pr_err("VMA ggtt status [%d] does not match parent [%d]\n",
+ i915_vma_is_ggtt(vma), i915_is_ggtt(vm));
+ ok = false;
+ }
+
+ if (i915_vma_compare(vma, vm, view)) {
+ pr_err("i915_vma_compare failed with create parameters!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (i915_vma_compare(vma, vma->vm,
+ i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) {
+ pr_err("i915_vma_compare failed with itself\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ok) {
+ pr_err("i915_vma_compare failed to detect the difference!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vma;
+}
+
+static int create_vmas(struct drm_i915_private *i915,
+ struct list_head *objects,
+ struct list_head *contexts)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ int pinned;
+
+ list_for_each_entry(obj, objects, st_link) {
+ for (pinned = 0; pinned <= 1; pinned++) {
+ list_for_each_entry(ctx, contexts, link) {
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+ int err;
+
+ vm = i915_gem_context_get_eb_vm(ctx);
+ vma = checked_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ if (!assert_vma(vma, obj, ctx)) {
+ pr_err("VMA lookup/create failed\n");
+ return -EINVAL;
+ }
+
+ if (!pinned) {
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ pr_err("Failed to pin VMA\n");
+ return err;
+ }
+ } else {
+ i915_vma_unpin(vma);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int igt_vma_create(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct drm_i915_gem_object *obj, *on;
+ struct i915_gem_context *ctx, *cn;
+ unsigned long num_obj, num_ctx;
+ unsigned long no, nc;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(contexts);
+ LIST_HEAD(objects);
+ int err = -ENOMEM;
+
+ /* Exercise creating many vma amonst many objections, checking the
+ * vma creation and lookup routines.
+ */
+
+ no = 0;
+ for_each_prime_number(num_obj, ULONG_MAX - 1) {
+ for (; no < num_obj; no++) {
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ list_add(&obj->st_link, &objects);
+ }
+
+ nc = 0;
+ for_each_prime_number(num_ctx, 2 * BITS_PER_LONG) {
+ for (; nc < num_ctx; nc++) {
+ ctx = mock_context(i915, "mock");
+ if (!ctx)
+ goto out;
+
+ list_move(&ctx->link, &contexts);
+ }
+
+ err = create_vmas(i915, &objects, &contexts);
+ if (err)
+ goto out;
+
+ if (igt_timeout(end_time,
+ "%s timed out: after %lu objects in %lu contexts\n",
+ __func__, no, nc))
+ goto end;
+ }
+
+ list_for_each_entry_safe(ctx, cn, &contexts, link) {
+ list_del_init(&ctx->link);
+ mock_context_close(ctx);
+ }
+
+ cond_resched();
+ }
+
+end:
+ /* Final pass to lookup all created contexts */
+ err = create_vmas(i915, &objects, &contexts);
+out:
+ list_for_each_entry_safe(ctx, cn, &contexts, link) {
+ list_del_init(&ctx->link);
+ mock_context_close(ctx);
+ }
+
+ list_for_each_entry_safe(obj, on, &objects, st_link)
+ i915_gem_object_put(obj);
+ return err;
+}
+
+struct pin_mode {
+ u64 size;
+ u64 flags;
+ bool (*assert)(const struct i915_vma *,
+ const struct pin_mode *mode,
+ int result);
+ const char *string;
+};
+
+static bool assert_pin_valid(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ if (result)
+ return false;
+
+ if (i915_vma_misplaced(vma, mode->size, 0, mode->flags))
+ return false;
+
+ return true;
+}
+
+__maybe_unused
+static bool assert_pin_enospc(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ return result == -ENOSPC;
+}
+
+__maybe_unused
+static bool assert_pin_einval(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ return result == -EINVAL;
+}
+
+static int igt_vma_pin1(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+ const struct pin_mode modes[] = {
+#define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
+#define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
+#define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL)
+#define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC)
+ VALID(0, PIN_GLOBAL),
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE),
+
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
+
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | ggtt->mappable_end),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | ggtt->vm.total),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
+
+ VALID(4096, PIN_GLOBAL),
+ VALID(8192, PIN_GLOBAL),
+ VALID(ggtt->mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(ggtt->mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
+ NOSPACE(ggtt->mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(ggtt->vm.total - 4096, PIN_GLOBAL),
+ VALID(ggtt->vm.total, PIN_GLOBAL),
+ NOSPACE(ggtt->vm.total + 4096, PIN_GLOBAL),
+ NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
+ INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
+
+ VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+
+#if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ /* Misusing BIAS is a programming error (it is not controllable
+ * from userspace) so when debugging is enabled, it explodes.
+ * However, the tests are still quite interesting for checking
+ * variable start, end and size.
+ */
+ NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | ggtt->mappable_end),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | ggtt->vm.total),
+ NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
+#endif
+ { },
+#undef NOSPACE
+#undef INVALID
+#undef __INVALID
+#undef VALID
+ }, *m;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err = -EINVAL;
+
+ /* Exercise all the weird and wonderful i915_vma_pin requests,
+ * focusing on error handling of boundary conditions.
+ */
+
+ GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm));
+
+ obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = checked_vma_instance(obj, &ggtt->vm, NULL);
+ if (IS_ERR(vma))
+ goto out;
+
+ for (m = modes; m->assert; m++) {
+ err = i915_vma_pin(vma, m->size, 0, m->flags);
+ if (!m->assert(vma, m, err)) {
+ pr_err("%s to pin single page into GGTT with mode[%d:%s]: size=%llx flags=%llx, err=%d\n",
+ m->assert == assert_pin_valid ? "Failed" : "Unexpectedly succeeded",
+ (int)(m - modes), m->string, m->size, m->flags,
+ err);
+ if (!err)
+ i915_vma_unpin(vma);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!err) {
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("Failed to unbind single page from GGTT, err=%d\n", err);
+ goto out;
+ }
+ }
+
+ cond_resched();
+ }
+
+ err = 0;
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static unsigned long rotated_index(const struct intel_rotation_info *r,
+ unsigned int n,
+ unsigned int x,
+ unsigned int y)
+{
+ return (r->plane[n].src_stride * (r->plane[n].height - y - 1) +
+ r->plane[n].offset + x);
+}
+
+static struct scatterlist *
+assert_rotated(struct drm_i915_gem_object *obj,
+ const struct intel_rotation_info *r, unsigned int n,
+ struct scatterlist *sg)
+{
+ unsigned int x, y;
+
+ for (x = 0; x < r->plane[n].width; x++) {
+ unsigned int left;
+
+ for (y = 0; y < r->plane[n].height; y++) {
+ unsigned long src_idx;
+ dma_addr_t src;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ src_idx = rotated_index(r, n, x, y);
+ src = i915_gem_object_get_dma_address(obj, src_idx);
+
+ if (sg_dma_len(sg) != PAGE_SIZE) {
+ pr_err("Invalid sg.length, found %d, expected %lu for rotated page (%d, %d) [src index %lu]\n",
+ sg_dma_len(sg), PAGE_SIZE,
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) != src) {
+ pr_err("Invalid address for rotated page (%d, %d) [src index %lu]\n",
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sg = sg_next(sg);
+ }
+
+ left = (r->plane[n].dst_stride - y) * PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_len(sg) != left) {
+ pr_err("Invalid sg.length, found %d, expected %u for rotated page (%d, %d)\n",
+ sg_dma_len(sg), left, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) != 0) {
+ pr_err("Invalid address, found %pad, expected 0 for remapped page (%d, %d)\n",
+ &sg_dma_address(sg), x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sg = sg_next(sg);
+ }
+
+ return sg;
+}
+
+static unsigned long remapped_index(const struct intel_remapped_info *r,
+ unsigned int n,
+ unsigned int x,
+ unsigned int y)
+{
+ return (r->plane[n].src_stride * y +
+ r->plane[n].offset + x);
+}
+
+static struct scatterlist *
+assert_remapped(struct drm_i915_gem_object *obj,
+ const struct intel_remapped_info *r, unsigned int n,
+ struct scatterlist *sg)
+{
+ unsigned int x, y;
+ unsigned int left = 0;
+ unsigned int offset;
+
+ for (y = 0; y < r->plane[n].height; y++) {
+ for (x = 0; x < r->plane[n].width; x++) {
+ unsigned long src_idx;
+ dma_addr_t src;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+ if (!left) {
+ offset = 0;
+ left = sg_dma_len(sg);
+ }
+
+ src_idx = remapped_index(r, n, x, y);
+ src = i915_gem_object_get_dma_address(obj, src_idx);
+
+ if (left < PAGE_SIZE || left & (PAGE_SIZE-1)) {
+ pr_err("Invalid sg.length, found %d, expected %lu for remapped page (%d, %d) [src index %lu]\n",
+ sg_dma_len(sg), PAGE_SIZE,
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) + offset != src) {
+ pr_err("Invalid address for remapped page (%d, %d) [src index %lu]\n",
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ left -= PAGE_SIZE;
+ offset += PAGE_SIZE;
+
+
+ if (!left)
+ sg = sg_next(sg);
+ }
+
+ if (left) {
+ pr_err("Unexpected sg tail with %d size for remapped page (%d, %d)\n",
+ left,
+ x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ left = (r->plane[n].dst_stride - r->plane[n].width) * PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_len(sg) != left) {
+ pr_err("Invalid sg.length, found %u, expected %u for remapped page (%d, %d)\n",
+ sg_dma_len(sg), left,
+ x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) != 0) {
+ pr_err("Invalid address, found %pad, expected 0 for remapped page (%d, %d)\n",
+ &sg_dma_address(sg),
+ x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sg = sg_next(sg);
+ left = 0;
+ }
+
+ return sg;
+}
+
+static unsigned int remapped_size(enum i915_gtt_view_type view_type,
+ const struct intel_remapped_plane_info *a,
+ const struct intel_remapped_plane_info *b)
+{
+
+ if (view_type == I915_GTT_VIEW_ROTATED)
+ return a->dst_stride * a->width + b->dst_stride * b->width;
+ else
+ return a->dst_stride * a->height + b->dst_stride * b->height;
+}
+
+static int igt_vma_rotate_remap(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_gem_object *obj;
+ const struct intel_remapped_plane_info planes[] = {
+ { .width = 1, .height = 1, .src_stride = 1 },
+ { .width = 2, .height = 2, .src_stride = 2 },
+ { .width = 4, .height = 4, .src_stride = 4 },
+ { .width = 8, .height = 8, .src_stride = 8 },
+
+ { .width = 3, .height = 5, .src_stride = 3 },
+ { .width = 3, .height = 5, .src_stride = 4 },
+ { .width = 3, .height = 5, .src_stride = 5 },
+
+ { .width = 5, .height = 3, .src_stride = 5 },
+ { .width = 5, .height = 3, .src_stride = 7 },
+ { .width = 5, .height = 3, .src_stride = 9 },
+
+ { .width = 4, .height = 6, .src_stride = 6 },
+ { .width = 6, .height = 4, .src_stride = 6 },
+
+ { .width = 2, .height = 2, .src_stride = 2, .dst_stride = 2 },
+ { .width = 3, .height = 3, .src_stride = 3, .dst_stride = 4 },
+ { .width = 5, .height = 6, .src_stride = 7, .dst_stride = 8 },
+
+ { }
+ }, *a, *b;
+ enum i915_gtt_view_type types[] = {
+ I915_GTT_VIEW_ROTATED,
+ I915_GTT_VIEW_REMAPPED,
+ 0,
+ }, *t;
+ const unsigned int max_pages = 64;
+ int err = -ENOMEM;
+
+ /* Create VMA for many different combinations of planes and check
+ * that the page layout within the rotated VMA match our expectations.
+ */
+
+ obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ for (t = types; *t; t++) {
+ for (a = planes; a->width; a++) {
+ for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
+ struct i915_gtt_view view = {
+ .type = *t,
+ .remapped.plane[0] = *a,
+ .remapped.plane[1] = *b,
+ };
+ struct intel_remapped_plane_info *plane_info = view.remapped.plane;
+ unsigned int n, max_offset;
+
+ max_offset = max(plane_info[0].src_stride * plane_info[0].height,
+ plane_info[1].src_stride * plane_info[1].height);
+ GEM_BUG_ON(max_offset > max_pages);
+ max_offset = max_pages - max_offset;
+
+ if (!plane_info[0].dst_stride)
+ plane_info[0].dst_stride = view.type == I915_GTT_VIEW_ROTATED ?
+ plane_info[0].height :
+ plane_info[0].width;
+ if (!plane_info[1].dst_stride)
+ plane_info[1].dst_stride = view.type == I915_GTT_VIEW_ROTATED ?
+ plane_info[1].height :
+ plane_info[1].width;
+
+ for_each_prime_number_from(plane_info[0].offset, 0, max_offset) {
+ for_each_prime_number_from(plane_info[1].offset, 0, max_offset) {
+ struct scatterlist *sg;
+ struct i915_vma *vma;
+ unsigned int expected_pages;
+
+ vma = checked_vma_instance(obj, vm, &view);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ pr_err("Failed to pin VMA, err=%d\n", err);
+ goto out_object;
+ }
+
+ expected_pages = remapped_size(view.type, &plane_info[0], &plane_info[1]);
+
+ if (view.type == I915_GTT_VIEW_ROTATED &&
+ vma->size != expected_pages * PAGE_SIZE) {
+ pr_err("VMA is wrong size, expected %lu, found %llu\n",
+ PAGE_SIZE * expected_pages, vma->size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (view.type == I915_GTT_VIEW_REMAPPED &&
+ vma->size > expected_pages * PAGE_SIZE) {
+ pr_err("VMA is wrong size, expected %lu, found %llu\n",
+ PAGE_SIZE * expected_pages, vma->size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->pages->nents > expected_pages) {
+ pr_err("sg table is wrong sizeo, expected %u, found %u nents\n",
+ expected_pages, vma->pages->nents);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->node.size < vma->size) {
+ pr_err("VMA binding too small, expected %llu, found %llu\n",
+ vma->size, vma->node.size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->pages == obj->mm.pages) {
+ pr_err("VMA using unrotated object pages!\n");
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ sg = vma->pages->sgl;
+ for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
+ if (view.type == I915_GTT_VIEW_ROTATED)
+ sg = assert_rotated(obj, &view.rotated, n, sg);
+ else
+ sg = assert_remapped(obj, &view.remapped, n, sg);
+ if (IS_ERR(sg)) {
+ pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d, %d), (%d, %d, %d, %d, %d)]\n",
+ view.type == I915_GTT_VIEW_ROTATED ?
+ "rotated" : "remapped", n,
+ plane_info[0].width,
+ plane_info[0].height,
+ plane_info[0].src_stride,
+ plane_info[0].dst_stride,
+ plane_info[0].offset,
+ plane_info[1].width,
+ plane_info[1].height,
+ plane_info[1].src_stride,
+ plane_info[1].dst_stride,
+ plane_info[1].offset);
+ err = -EINVAL;
+ goto out_object;
+ }
+ }
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
+ cond_resched();
+ }
+ }
+ }
+ }
+ }
+
+out_object:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static bool assert_partial(struct drm_i915_gem_object *obj,
+ struct i915_vma *vma,
+ unsigned long offset,
+ unsigned long size)
+{
+ struct sgt_iter sgt;
+ dma_addr_t dma;
+
+ for_each_sgt_daddr(dma, sgt, vma->pages) {
+ dma_addr_t src;
+
+ if (!size) {
+ pr_err("Partial scattergather list too long\n");
+ return false;
+ }
+
+ src = i915_gem_object_get_dma_address(obj, offset);
+ if (src != dma) {
+ pr_err("DMA mismatch for partial page offset %lu\n",
+ offset);
+ return false;
+ }
+
+ offset++;
+ size--;
+ }
+
+ return true;
+}
+
+static bool assert_pin(struct i915_vma *vma,
+ struct i915_gtt_view *view,
+ u64 size,
+ const char *name)
+{
+ bool ok = true;
+
+ if (vma->size != size) {
+ pr_err("(%s) VMA is wrong size, expected %llu, found %llu\n",
+ name, size, vma->size);
+ ok = false;
+ }
+
+ if (vma->node.size < vma->size) {
+ pr_err("(%s) VMA binding too small, expected %llu, found %llu\n",
+ name, vma->size, vma->node.size);
+ ok = false;
+ }
+
+ if (view && view->type != I915_GTT_VIEW_NORMAL) {
+ if (memcmp(&vma->gtt_view, view, sizeof(*view))) {
+ pr_err("(%s) VMA mismatch upon creation!\n",
+ name);
+ ok = false;
+ }
+
+ if (vma->pages == vma->obj->mm.pages) {
+ pr_err("(%s) VMA using original object pages!\n",
+ name);
+ ok = false;
+ }
+ } else {
+ if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
+ pr_err("Not the normal ggtt view! Found %d\n",
+ vma->gtt_view.type);
+ ok = false;
+ }
+
+ if (vma->pages != vma->obj->mm.pages) {
+ pr_err("VMA not using object pages!\n");
+ ok = false;
+ }
+ }
+
+ return ok;
+}
+
+static int igt_vma_partial(void *arg)
+{
+ struct i915_ggtt *ggtt = arg;
+ struct i915_address_space *vm = &ggtt->vm;
+ const unsigned int npages = 1021; /* prime! */
+ struct drm_i915_gem_object *obj;
+ const struct phase {
+ const char *name;
+ } phases[] = {
+ { "create" },
+ { "lookup" },
+ { },
+ }, *p;
+ unsigned int sz, offset;
+ struct i915_vma *vma;
+ int err = -ENOMEM;
+
+ /* Create lots of different VMA for the object and check that
+ * we are returned the same VMA when we later request the same range.
+ */
+
+ obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ for (p = phases; p->name; p++) { /* exercise both create/lookup */
+ unsigned int count, nvma;
+
+ nvma = 0;
+ for_each_prime_number_from(sz, 1, npages) {
+ for_each_prime_number_from(offset, 0, npages - sz) {
+ struct i915_gtt_view view;
+
+ view.type = I915_GTT_VIEW_PARTIAL;
+ view.partial.offset = offset;
+ view.partial.size = sz;
+
+ if (sz == npages)
+ view.type = I915_GTT_VIEW_NORMAL;
+
+ vma = checked_vma_instance(obj, vm, &view);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_object;
+
+ if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) {
+ pr_err("(%s) Inconsistent partial pinning for (offset=%d, size=%d)\n",
+ p->name, offset, sz);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (!assert_partial(obj, vma, offset, sz)) {
+ pr_err("(%s) Inconsistent partial pages for (offset=%d, size=%d)\n",
+ p->name, offset, sz);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ i915_vma_unpin(vma);
+ nvma++;
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
+
+ cond_resched();
+ }
+ }
+
+ count = 0;
+ list_for_each_entry(vma, &obj->vma.list, obj_link)
+ count++;
+ if (count != nvma) {
+ pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n",
+ p->name, count, nvma);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ /* Check that we did create the whole object mapping */
+ vma = checked_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_object;
+
+ if (!assert_pin(vma, NULL, obj->base.size, p->name)) {
+ pr_err("(%s) inconsistent full pin\n", p->name);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ i915_vma_unpin(vma);
+
+ err = i915_vma_unbind_unlocked(vma);
+ if (err) {
+ pr_err("Unbinding returned %i\n", err);
+ goto out_object;
+ }
+
+ count = 0;
+ list_for_each_entry(vma, &obj->vma.list, obj_link)
+ count++;
+ if (count != nvma) {
+ pr_err("(%s) allocated an extra full vma!\n", p->name);
+ err = -EINVAL;
+ goto out_object;
+ }
+ }
+
+out_object:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+int i915_vma_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_vma_create),
+ SUBTEST(igt_vma_pin1),
+ SUBTEST(igt_vma_rotate_remap),
+ SUBTEST(igt_vma_partial),
+ };
+ struct drm_i915_private *i915;
+ struct intel_gt *gt;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ /* allocate the ggtt */
+ err = intel_gt_assign_ggtt(to_gt(i915));
+ if (err)
+ goto out_put;
+
+ gt = to_gt(i915);
+
+ mock_init_ggtt(gt);
+
+ err = i915_subtests(tests, gt->ggtt);
+
+ mock_device_flush(i915);
+ i915_gem_drain_freed_objects(i915);
+ mock_fini_ggtt(gt->ggtt);
+
+out_put:
+ mock_destroy_device(i915);
+ return err;
+}
+
+static int igt_vma_remapped_gtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const struct intel_remapped_plane_info planes[] = {
+ { .width = 1, .height = 1, .src_stride = 1 },
+ { .width = 2, .height = 2, .src_stride = 2 },
+ { .width = 4, .height = 4, .src_stride = 4 },
+ { .width = 8, .height = 8, .src_stride = 8 },
+
+ { .width = 3, .height = 5, .src_stride = 3 },
+ { .width = 3, .height = 5, .src_stride = 4 },
+ { .width = 3, .height = 5, .src_stride = 5 },
+
+ { .width = 5, .height = 3, .src_stride = 5 },
+ { .width = 5, .height = 3, .src_stride = 7 },
+ { .width = 5, .height = 3, .src_stride = 9 },
+
+ { .width = 4, .height = 6, .src_stride = 6 },
+ { .width = 6, .height = 4, .src_stride = 6 },
+
+ { .width = 2, .height = 2, .src_stride = 2, .dst_stride = 2 },
+ { .width = 3, .height = 3, .src_stride = 3, .dst_stride = 4 },
+ { .width = 5, .height = 6, .src_stride = 7, .dst_stride = 8 },
+
+ { }
+ }, *p;
+ enum i915_gtt_view_type types[] = {
+ I915_GTT_VIEW_ROTATED,
+ I915_GTT_VIEW_REMAPPED,
+ 0,
+ }, *t;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
+ return 0;
+
+ obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ for (t = types; *t; t++) {
+ for (p = planes; p->width; p++) {
+ struct i915_gtt_view view = {
+ .type = *t,
+ .rotated.plane[0] = *p,
+ };
+ struct intel_remapped_plane_info *plane_info = view.rotated.plane;
+ struct i915_vma *vma;
+ u32 __iomem *map;
+ unsigned int x, y;
+
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out;
+
+ if (!plane_info[0].dst_stride)
+ plane_info[0].dst_stride = *t == I915_GTT_VIEW_ROTATED ?
+ p->height : p->width;
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(vma->gtt_view.type != *t);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ for (y = 0 ; y < plane_info[0].height; y++) {
+ for (x = 0 ; x < plane_info[0].width; x++) {
+ unsigned int offset;
+ u32 val = y << 16 | x;
+
+ if (*t == I915_GTT_VIEW_ROTATED)
+ offset = (x * plane_info[0].dst_stride + y) * PAGE_SIZE;
+ else
+ offset = (y * plane_info[0].dst_stride + x) * PAGE_SIZE;
+
+ iowrite32(val, &map[offset / sizeof(*map)]);
+ }
+ }
+
+ i915_vma_unpin_iomap(vma);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(vma->gtt_view.type != I915_GTT_VIEW_NORMAL);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ for (y = 0 ; y < plane_info[0].height; y++) {
+ for (x = 0 ; x < plane_info[0].width; x++) {
+ unsigned int offset, src_idx;
+ u32 exp = y << 16 | x;
+ u32 val;
+
+ if (*t == I915_GTT_VIEW_ROTATED)
+ src_idx = rotated_index(&view.rotated, 0, x, y);
+ else
+ src_idx = remapped_index(&view.remapped, 0, x, y);
+ offset = src_idx * PAGE_SIZE;
+
+ val = ioread32(&map[offset / sizeof(*map)]);
+ if (val != exp) {
+ pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n",
+ *t == I915_GTT_VIEW_ROTATED ? "Rotated" : "Remapped",
+ exp, val);
+ i915_vma_unpin_iomap(vma);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+ i915_vma_unpin_iomap(vma);
+
+ cond_resched();
+ }
+ }
+
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int i915_vma_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_vma_remapped_gtt),
+ };
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.c b/drivers/gpu/drm/i915/selftests/igt_atomic.c
new file mode 100644
index 000000000..fb506b699
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/preempt.h>
+#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
+
+#include "igt_atomic.h"
+
+static void __preempt_begin(void)
+{
+ preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+ preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+const struct igt_atomic_section igt_atomic_phases[] = {
+ { "preempt", __preempt_begin, __preempt_end },
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+};
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h
new file mode 100644
index 000000000..1991798ab
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_ATOMIC_H
+#define IGT_ATOMIC_H
+
+struct igt_atomic_section {
+ const char *name;
+ void (*critical_section_begin)(void);
+ void (*critical_section_end)(void);
+};
+
+extern const struct igt_atomic_section igt_atomic_phases[];
+
+#endif /* IGT_ATOMIC_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
new file mode 100644
index 000000000..b484e12df
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
+
+#include "i915_drv.h"
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+
+int igt_flush_test(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt = to_gt(i915);
+ int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
+
+ cond_resched();
+
+ if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
+
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+ ret = -EIO;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
new file mode 100644
index 000000000..7541fa74e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_FLUSH_TEST_H
+#define IGT_FLUSH_TEST_H
+
+struct drm_i915_private;
+
+int igt_flush_test(struct drm_i915_private *i915);
+
+#endif /* IGT_FLUSH_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
new file mode 100644
index 000000000..72b58b666
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -0,0 +1,73 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "gt/intel_gt.h"
+
+#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_live_test.h"
+
+int igt_live_test_begin(struct igt_live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name)
+{
+ struct intel_gt *gt = to_gt(i915);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ t->i915 = i915;
+ t->func = func;
+ t->name = name;
+
+ err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+ if (err) {
+ pr_err("%s(%s): failed to idle before, with err=%d!",
+ func, name, err);
+ return err;
+ }
+
+ t->reset_global = i915_reset_count(&i915->gpu_error);
+
+ for_each_engine(engine, gt, id)
+ t->reset_engine[id] =
+ i915_reset_engine_count(&i915->gpu_error, engine);
+
+ return 0;
+}
+
+int igt_live_test_end(struct igt_live_test *t)
+{
+ struct drm_i915_private *i915 = t->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (igt_flush_test(i915))
+ return -EIO;
+
+ if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
+ pr_err("%s(%s): GPU was reset %d times!\n",
+ t->func, t->name,
+ i915_reset_count(&i915->gpu_error) - t->reset_global);
+ return -EIO;
+ }
+
+ for_each_engine(engine, to_gt(i915), id) {
+ if (t->reset_engine[id] ==
+ i915_reset_engine_count(&i915->gpu_error, engine))
+ continue;
+
+ pr_err("%s(%s): engine '%s' was reset %d times!\n",
+ t->func, t->name, engine->name,
+ i915_reset_engine_count(&i915->gpu_error, engine) -
+ t->reset_engine[id]);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
new file mode 100644
index 000000000..36ed42736
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef IGT_LIVE_TEST_H
+#define IGT_LIVE_TEST_H
+
+#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
+
+struct drm_i915_private;
+
+struct igt_live_test {
+ struct drm_i915_private *i915;
+ const char *func;
+ const char *name;
+
+ unsigned int reset_global;
+ unsigned int reset_engine[I915_NUM_ENGINES];
+};
+
+/*
+ * Flush the GPU state before and after the test to ensure that no residual
+ * code is running on the GPU that may affect this test. Also compare the
+ * state before and after the test and alert if it unexpectedly changes,
+ * e.g. if the GPU was reset.
+ */
+int igt_live_test_begin(struct igt_live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name);
+int igt_live_test_end(struct igt_live_test *t);
+
+#endif /* IGT_LIVE_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.c b/drivers/gpu/drm/i915/selftests/igt_mmap.c
new file mode 100644
index 000000000..e920a461b
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.c
@@ -0,0 +1,52 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+#include "igt_mmap.h"
+
+unsigned long igt_mmap_offset(struct drm_i915_private *i915,
+ u64 offset,
+ unsigned long size,
+ unsigned long prot,
+ unsigned long flags)
+{
+ struct drm_vma_offset_node *node;
+ struct file *file;
+ unsigned long addr;
+ int err;
+
+ /* no need to refcount, we own this object */
+ drm_vma_offset_lock_lookup(i915->drm.vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(i915->drm.vma_offset_manager,
+ offset / PAGE_SIZE, size / PAGE_SIZE);
+ drm_vma_offset_unlock_lookup(i915->drm.vma_offset_manager);
+
+ if (GEM_WARN_ON(!node)) {
+ pr_info("Failed to lookup %llx\n", offset);
+ return -ENOENT;
+ }
+
+ /* Pretend to open("/dev/dri/card0") */
+ file = mock_drm_getfile(i915->drm.primary, O_RDWR);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ err = drm_vma_node_allow(node, file->private_data);
+ if (err) {
+ addr = err;
+ goto out_file;
+ }
+
+ addr = vm_mmap(file, 0, drm_vma_node_size(node) << PAGE_SHIFT,
+ prot, flags, drm_vma_node_offset_addr(node));
+
+ drm_vma_node_revoke(node, file->private_data);
+out_file:
+ fput(file);
+ return addr;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.h b/drivers/gpu/drm/i915/selftests/igt_mmap.h
new file mode 100644
index 000000000..acbe34d81
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.h
@@ -0,0 +1,21 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef IGT_MMAP_H
+#define IGT_MMAP_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_vma_offset_node;
+
+unsigned long igt_mmap_offset(struct drm_i915_private *i915,
+ u64 offset,
+ unsigned long size,
+ unsigned long prot,
+ unsigned long flags);
+
+#endif /* IGT_MMAP_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
new file mode 100644
index 000000000..a2838c65f
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -0,0 +1,51 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_reset.h"
+
+#include "gt/intel_engine.h"
+#include "gt/intel_gt.h"
+
+#include "../i915_drv.h"
+
+void igt_global_reset_lock(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags);
+
+ while (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags))
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+
+ for_each_engine(engine, gt, id) {
+ while (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+void igt_global_reset_unlock(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
+
+ clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
+ wake_up_all(&gt->reset.queue);
+}
+
+bool igt_force_reset(struct intel_gt *gt)
+{
+ intel_gt_set_wedged(gt);
+ intel_gt_reset(gt, 0, NULL);
+
+ return !intel_gt_is_wedged(gt);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
new file mode 100644
index 000000000..851873b67
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -0,0 +1,18 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_RESET_H__
+#define __I915_SELFTESTS_IGT_RESET_H__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void igt_global_reset_lock(struct intel_gt *gt);
+void igt_global_reset_unlock(struct intel_gt *gt);
+bool igt_force_reset(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
new file mode 100644
index 000000000..0c22594ae
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -0,0 +1,277 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+
+#include "gem/i915_gem_internal.h"
+#include "gem/selftests/igt_gem_utils.h"
+
+#include "igt_spinner.h"
+
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
+{
+ int err;
+
+ memset(spin, 0, sizeof(*spin));
+ spin->gt = gt;
+
+ spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(spin->hws)) {
+ err = PTR_ERR(spin->hws);
+ goto err;
+ }
+ i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
+
+ spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(spin->obj)) {
+ err = PTR_ERR(spin->obj);
+ goto err_hws;
+ }
+
+ return 0;
+
+err_hws:
+ i915_gem_object_put(spin->hws);
+err:
+ return err;
+}
+
+static void *igt_spinner_pin_obj(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object *obj,
+ unsigned int mode, struct i915_vma **vma)
+{
+ void *vaddr;
+ int ret;
+
+ *vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(*vma))
+ return ERR_CAST(*vma);
+
+ ret = i915_gem_object_lock(obj, ww);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vaddr = i915_gem_object_pin_map(obj, mode);
+
+ if (!ww)
+ i915_gem_object_unlock(obj);
+
+ if (IS_ERR(vaddr))
+ return vaddr;
+
+ if (ww)
+ ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER);
+ else
+ ret = i915_vma_pin(*vma, 0, 0, PIN_USER);
+
+ if (ret) {
+ i915_gem_object_unpin_map(obj);
+ return ERR_PTR(ret);
+ }
+
+ return vaddr;
+}
+
+int igt_spinner_pin(struct igt_spinner *spin,
+ struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ void *vaddr;
+
+ if (spin->ce && WARN_ON(spin->ce != ce))
+ return -ENODEV;
+ spin->ce = ce;
+
+ if (!spin->seqno) {
+ vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+ }
+
+ if (!spin->batch) {
+ unsigned int mode;
+
+ mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
+ vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ spin->batch = vaddr;
+ }
+
+ return 0;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+ return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct i915_request *rq)
+{
+ return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct intel_context *ce,
+ u32 arbitration_command)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_request *rq = NULL;
+ struct i915_vma *hws, *vma;
+ unsigned int flags;
+ u32 *batch;
+ int err;
+
+ GEM_BUG_ON(spin->gt != ce->vm->gt);
+
+ if (!intel_engine_can_store_dword(ce->engine))
+ return ERR_PTR(-ENODEV);
+
+ if (!spin->batch) {
+ err = igt_spinner_pin(spin, ce, NULL);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ hws = spin->hws_vma;
+ vma = spin->batch_vma;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return ERR_CAST(rq);
+
+ err = move_to_active(vma, rq, 0);
+ if (err)
+ goto cancel_rq;
+
+ err = move_to_active(hws, rq, 0);
+ if (err)
+ goto cancel_rq;
+
+ batch = spin->batch;
+
+ if (GRAPHICS_VER(rq->engine->i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ } else if (GRAPHICS_VER(rq->engine->i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = hws_address(hws, rq);
+ }
+ *batch++ = rq->fence.seqno;
+
+ *batch++ = arbitration_command;
+
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ else if (IS_HASWELL(rq->engine->i915))
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
+ else if (GRAPHICS_VER(rq->engine->i915) >= 6)
+ *batch++ = MI_BATCH_BUFFER_START;
+ else
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ intel_gt_chipset_flush(engine->gt);
+
+ if (engine->emit_init_breadcrumb) {
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
+ flags = 0;
+ if (GRAPHICS_VER(rq->engine->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+ err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+
+cancel_rq:
+ if (err) {
+ i915_request_set_error_once(rq, err);
+ i915_request_add(rq);
+ }
+ return err ? ERR_PTR(err) : rq;
+}
+
+static u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+ u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+ return READ_ONCE(*seqno);
+}
+
+void igt_spinner_end(struct igt_spinner *spin)
+{
+ if (!spin->batch)
+ return;
+
+ *spin->batch = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(spin->gt);
+}
+
+void igt_spinner_fini(struct igt_spinner *spin)
+{
+ igt_spinner_end(spin);
+
+ if (spin->batch) {
+ i915_vma_unpin(spin->batch_vma);
+ i915_gem_object_unpin_map(spin->obj);
+ }
+ i915_gem_object_put(spin->obj);
+
+ if (spin->seqno) {
+ i915_vma_unpin(spin->hws_vma);
+ i915_gem_object_unpin_map(spin->hws);
+ }
+ i915_gem_object_put(spin->hws);
+}
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
+{
+ if (i915_request_is_ready(rq))
+ intel_engine_flush_submission(rq->engine);
+
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 100) &&
+ wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 50));
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
new file mode 100644
index 000000000..fbe5b1625
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -0,0 +1,43 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
+#define __I915_SELFTESTS_IGT_SPINNER_H__
+
+#include "gem/i915_gem_context.h"
+#include "gt/intel_engine.h"
+
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_selftest.h"
+
+struct intel_gt;
+
+struct igt_spinner {
+ struct intel_gt *gt;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_vma *hws_vma, *batch_vma;
+ u32 *batch;
+ void *seqno;
+};
+
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt);
+int igt_spinner_pin(struct igt_spinner *spin,
+ struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww);
+void igt_spinner_fini(struct igt_spinner *spin);
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct intel_context *ce,
+ u32 arbitration_command);
+void igt_spinner_end(struct igt_spinner *spin);
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
new file mode 100644
index 000000000..3b18e5905
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -0,0 +1,1411 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+#include <linux/sort.h>
+
+#include <drm/drm_buddy.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+#include "mock_region.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_migrate.h"
+#include "i915_memcpy.h"
+#include "i915_ttm_buddy_manager.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void close_objects(struct intel_memory_region *mem,
+ struct list_head *objects)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ i915_gem_object_lock(obj, NULL);
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+ /* No polluting the memory region between tests */
+ __i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+
+ cond_resched();
+
+ i915_gem_drain_freed_objects(i915);
+}
+
+static int igt_mock_fill(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ resource_size_t total = resource_size(&mem->region);
+ resource_size_t page_size;
+ resource_size_t rem;
+ unsigned long max_pages;
+ unsigned long page_num;
+ LIST_HEAD(objects);
+ int err = 0;
+
+ page_size = PAGE_SIZE;
+ max_pages = div64_u64(total, page_size);
+ rem = total;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ resource_size_t size = page_num * page_size;
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_region(mem, size, 0, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+ rem -= size;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+ if (err == -ENXIO) {
+ if (page_num * page_size <= rem) {
+ pr_err("%s failed, space still left in region\n",
+ __func__);
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
+ }
+
+ close_objects(mem, &objects);
+
+ return err;
+}
+
+static struct drm_i915_gem_object *
+igt_object_create(struct intel_memory_region *mem,
+ struct list_head *objects,
+ u64 size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mem, size, 0, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err)
+ goto put;
+
+ list_add(&obj->st_link, objects);
+ return obj;
+
+put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static void igt_object_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_lock(obj, NULL);
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+}
+
+static bool is_contiguous(struct drm_i915_gem_object *obj)
+{
+ struct scatterlist *sg;
+ dma_addr_t addr = -1;
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (addr != -1 && sg_dma_address(sg) != addr)
+ return false;
+
+ addr = sg_dma_address(sg) + sg_dma_len(sg);
+ }
+
+ return true;
+}
+
+static int igt_mock_reserve(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ resource_size_t avail = resource_size(&mem->region);
+ struct drm_i915_gem_object *obj;
+ const u32 chunk_size = SZ_32M;
+ u32 i, offset, count, *order;
+ u64 allocated, cur_avail;
+ I915_RND_STATE(prng);
+ LIST_HEAD(objects);
+ int err = 0;
+
+ count = avail / chunk_size;
+ order = i915_random_order(count, &prng);
+ if (!order)
+ return 0;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ err = PTR_ERR(mem);
+ goto out_free_order;
+ }
+
+ /* Reserve a bunch of ranges within the region */
+ for (i = 0; i < count; ++i) {
+ u64 start = order[i] * chunk_size;
+ u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
+
+ /* Allow for some really big holes */
+ if (!size)
+ continue;
+
+ size = round_up(size, PAGE_SIZE);
+ offset = igt_random_offset(&prng, 0, chunk_size, size,
+ PAGE_SIZE);
+
+ err = intel_memory_region_reserve(mem, start + offset, size);
+ if (err) {
+ pr_err("%s failed to reserve range", __func__);
+ goto out_close;
+ }
+
+ /* XXX: maybe sanity check the block range here? */
+ avail -= size;
+ }
+
+ /* Try to see if we can allocate from the remaining space */
+ allocated = 0;
+ cur_avail = avail;
+ do {
+ u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
+
+ size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ if (PTR_ERR(obj) == -ENXIO)
+ break;
+
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+ cur_avail -= size;
+ allocated += size;
+ } while (1);
+
+ if (allocated != avail) {
+ pr_err("%s mismatch between allocation and free space", __func__);
+ err = -EINVAL;
+ }
+
+out_close:
+ close_objects(mem, &objects);
+ intel_memory_region_destroy(mem);
+out_free_order:
+ kfree(order);
+ return err;
+}
+
+static int igt_mock_contiguous(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned long n_objects;
+ LIST_HEAD(objects);
+ LIST_HEAD(holes);
+ I915_RND_STATE(prng);
+ resource_size_t total;
+ resource_size_t min;
+ u64 target;
+ int err = 0;
+
+ total = resource_size(&mem->region);
+
+ /* Min size */
+ obj = igt_object_create(mem, &objects, PAGE_SIZE,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (!is_contiguous(obj)) {
+ pr_err("%s min object spans disjoint sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Max size */
+ obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (!is_contiguous(obj)) {
+ pr_err("%s max object spans disjoint sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Internal fragmentation should not bleed into the object size */
+ target = i915_prandom_u64_state(&prng);
+ div64_u64_rem(target, total, &target);
+ target = round_up(target, PAGE_SIZE);
+ target = max_t(u64, PAGE_SIZE, target);
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->base.size != target) {
+ pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
+ obj->base.size, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ if (!is_contiguous(obj)) {
+ pr_err("%s object spans disjoint sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Try to fragment the address space, such that half of it is free, but
+ * the max contiguous block size is SZ_64K.
+ */
+
+ target = SZ_64K;
+ n_objects = div64_u64(total, target);
+
+ while (n_objects--) {
+ struct list_head *list;
+
+ if (n_objects % 2)
+ list = &holes;
+ else
+ list = &objects;
+
+ obj = igt_object_create(mem, list, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+ }
+
+ close_objects(mem, &holes);
+
+ min = target;
+ target = total >> 1;
+
+ /* Make sure we can still allocate all the fragmented space */
+ obj = igt_object_create(mem, &objects, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Even though we have enough free space, we don't have a big enough
+ * contiguous block. Make sure that holds true.
+ */
+
+ do {
+ bool should_fail = target > min;
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (should_fail != IS_ERR(obj)) {
+ pr_err("%s target allocation(%llx) mismatch\n",
+ __func__, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ target >>= 1;
+ } while (target >= PAGE_SIZE);
+
+err_close_objects:
+ list_splice_tail(&holes, &objects);
+ close_objects(mem, &objects);
+ return err;
+}
+
+static int igt_mock_splintered_region(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct i915_ttm_buddy_resource *res;
+ struct drm_i915_gem_object *obj;
+ struct drm_buddy *mm;
+ unsigned int expected_order;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * Sanity check we can still allocate everything even if the
+ * mm.max_order != mm.size. i.e our starting address space size is not a
+ * power-of-two.
+ */
+
+ size = (SZ_4G - 1) & PAGE_MASK;
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ res = to_ttm_buddy_resource(obj->mm.res);
+ mm = res->mm;
+ if (mm->size != size) {
+ pr_err("%s size mismatch(%llu != %llu)\n",
+ __func__, mm->size, size);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ expected_order = get_order(rounddown_pow_of_two(size));
+ if (mm->max_order != expected_order) {
+ pr_err("%s order mismatch(%u != %u)\n",
+ __func__, mm->max_order, expected_order);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ close_objects(mem, &objects);
+
+ /*
+ * While we should be able allocate everything without any flag
+ * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
+ * actually limited to the largest power-of-two for the region size i.e
+ * max_order, due to the inner workings of the buddy allocator. So make
+ * sure that does indeed hold true.
+ */
+
+ obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (!IS_ERR(obj)) {
+ pr_err("%s too large contiguous allocation was not rejected\n",
+ __func__);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ pr_err("%s largest possible contiguous allocation failed\n",
+ __func__);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_destroy(mem);
+ return err;
+}
+
+#ifndef SZ_8G
+#define SZ_8G BIT_ULL(33)
+#endif
+
+static int igt_mock_max_segment(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct i915_ttm_buddy_resource *res;
+ struct drm_i915_gem_object *obj;
+ struct drm_buddy_block *block;
+ struct drm_buddy *mm;
+ struct list_head *blocks;
+ struct scatterlist *sg;
+ I915_RND_STATE(prng);
+ LIST_HEAD(objects);
+ unsigned int max_segment;
+ unsigned int ps;
+ u64 size;
+ int err = 0;
+
+ /*
+ * While we may create very large contiguous blocks, we may need
+ * to break those down for consumption elsewhere. In particular,
+ * dma-mapping with scatterlist elements have an implicit limit of
+ * UINT_MAX on each element.
+ */
+
+ size = SZ_8G;
+ ps = PAGE_SIZE;
+ if (i915_prandom_u64_state(&prng) & 1)
+ ps = SZ_64K; /* For something like DG2 */
+
+ max_segment = round_down(UINT_MAX, ps);
+
+ mem = mock_region_create(i915, 0, size, ps, 0, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ res = to_ttm_buddy_resource(obj->mm.res);
+ blocks = &res->blocks;
+ mm = res->mm;
+ size = 0;
+ list_for_each_entry(block, blocks, link) {
+ if (drm_buddy_block_size(mm, block) > size)
+ size = drm_buddy_block_size(mm, block);
+ }
+ if (size < max_segment) {
+ pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
+ __func__, max_segment, size);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ dma_addr_t daddr = sg_dma_address(sg);
+
+ if (sg->length > max_segment) {
+ pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+ __func__, sg->length, max_segment);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ if (!IS_ALIGNED(daddr, ps)) {
+ pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n",
+ __func__, &daddr, ps);
+ err = -EINVAL;
+ goto out_close;
+ }
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_destroy(mem);
+ return err;
+}
+
+static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mr = obj->mm.region;
+ struct i915_ttm_buddy_resource *bman_res =
+ to_ttm_buddy_resource(obj->mm.res);
+ struct drm_buddy *mm = bman_res->mm;
+ struct drm_buddy_block *block;
+ u64 total;
+
+ total = 0;
+ list_for_each_entry(block, &bman_res->blocks, link) {
+ u64 start = drm_buddy_block_offset(block);
+ u64 end = start + drm_buddy_block_size(mm, block);
+
+ if (start < mr->io_size)
+ total += min_t(u64, end, mr->io_size) - start;
+ }
+
+ return total;
+}
+
+static int igt_mock_io_size(void *arg)
+{
+ struct intel_memory_region *mr = arg;
+ struct drm_i915_private *i915 = mr->i915;
+ struct drm_i915_gem_object *obj;
+ u64 mappable_theft_total;
+ u64 io_size;
+ u64 total;
+ u64 ps;
+ u64 rem;
+ u64 size;
+ I915_RND_STATE(prng);
+ LIST_HEAD(objects);
+ int err = 0;
+
+ ps = SZ_4K;
+ if (i915_prandom_u64_state(&prng) & 1)
+ ps = SZ_64K; /* For something like DG2 */
+
+ div64_u64_rem(i915_prandom_u64_state(&prng), SZ_8G, &total);
+ total = round_down(total, ps);
+ total = max_t(u64, total, SZ_1G);
+
+ div64_u64_rem(i915_prandom_u64_state(&prng), total - ps, &io_size);
+ io_size = round_down(io_size, ps);
+ io_size = max_t(u64, io_size, SZ_256M); /* 256M seems to be the common lower limit */
+
+ pr_info("%s with ps=%llx, io_size=%llx, total=%llx\n",
+ __func__, ps, io_size, total);
+
+ mr = mock_region_create(i915, 0, total, ps, 0, io_size);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto out_err;
+ }
+
+ mappable_theft_total = 0;
+ rem = total - io_size;
+ do {
+ div64_u64_rem(i915_prandom_u64_state(&prng), rem, &size);
+ size = round_down(size, ps);
+ size = max(size, ps);
+
+ obj = igt_object_create(mr, &objects, size,
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj)) {
+ pr_err("%s TOPDOWN failed with rem=%llx, size=%llx\n",
+ __func__, rem, size);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ mappable_theft_total += igt_object_mappable_total(obj);
+ rem -= size;
+ } while (rem);
+
+ pr_info("%s mappable theft=(%lluMiB/%lluMiB), total=%lluMiB\n",
+ __func__,
+ (u64)mappable_theft_total >> 20,
+ (u64)io_size >> 20,
+ (u64)total >> 20);
+
+ /*
+ * Even if we allocate all of the non-mappable portion, we should still
+ * be able to dip into the mappable portion.
+ */
+ obj = igt_object_create(mr, &objects, io_size,
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj)) {
+ pr_err("%s allocation unexpectedly failed\n", __func__);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ close_objects(mr, &objects);
+
+ rem = io_size;
+ do {
+ div64_u64_rem(i915_prandom_u64_state(&prng), rem, &size);
+ size = round_down(size, ps);
+ size = max(size, ps);
+
+ obj = igt_object_create(mr, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ pr_err("%s MAPPABLE failed with rem=%llx, size=%llx\n",
+ __func__, rem, size);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ if (igt_object_mappable_total(obj) != size) {
+ pr_err("%s allocation is not mappable(size=%llx)\n",
+ __func__, size);
+ err = -EINVAL;
+ goto out_close;
+ }
+ rem -= size;
+ } while (rem);
+
+ /*
+ * We assume CPU access is required by default, which should result in a
+ * failure here, even though the non-mappable portion is free.
+ */
+ obj = igt_object_create(mr, &objects, ps, 0);
+ if (!IS_ERR(obj)) {
+ pr_err("%s allocation unexpectedly succeeded\n", __func__);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+out_close:
+ close_objects(mr, &objects);
+ intel_memory_region_destroy(mr);
+out_err:
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+static int igt_gpu_write_dw(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 dword,
+ u32 value)
+{
+ return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+ vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n = obj->base.size >> PAGE_SHIFT;
+ u32 *ptr;
+ int err;
+
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ ptr += dword;
+ while (n--) {
+ if (*ptr != val) {
+ pr_err("base[%u]=%08x, val=%08x\n",
+ dword, *ptr, val);
+ err = -EINVAL;
+ break;
+ }
+
+ ptr += PAGE_SIZE / sizeof(*ptr);
+ }
+
+ i915_gem_object_unpin_map(obj);
+ return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+ struct i915_vma *vma;
+ int *order;
+ int i, n;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ n = 0;
+ count = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ vm = ce->vm;
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
+ if (!n)
+ return 0;
+
+ order = i915_random_order(count * count, &prng);
+ if (!order)
+ return -ENOMEM;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_free;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_free;
+
+ i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
+ do {
+ u32 rng = prandom_u32_state(&prng);
+ u32 dword = offset_in_page(rng) / 4;
+
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ err = igt_gpu_write_dw(ce, vma, dword, rng);
+ if (err)
+ break;
+
+ i915_gem_object_lock(obj, NULL);
+ err = igt_cpu_check(obj, dword, rng);
+ i915_gem_object_unlock(obj);
+ if (err)
+ break;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_gem_context_unlock_engines(ctx);
+
+out_free:
+ kfree(order);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+static int igt_lmem_create(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err)
+ goto out_put;
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_lmem_create_with_ps(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ int err = 0;
+ u32 ps;
+
+ for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
+ struct drm_i915_gem_object *obj;
+ dma_addr_t daddr;
+
+ obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -ENXIO || err == -E2BIG) {
+ pr_info("%s not enough lmem for ps(%u) err=%d\n",
+ __func__, ps, err);
+ err = 0;
+ }
+
+ break;
+ }
+
+ if (obj->base.size != ps) {
+ pr_err("%s size(%zu) != ps(%u)\n",
+ __func__, obj->base.size, ps);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) {
+ pr_info("%s not enough lmem for ps(%u) err=%d\n",
+ __func__, ps, err);
+ err = 0;
+ }
+ goto out_put;
+ }
+
+ daddr = i915_gem_object_get_dma_address(obj, 0);
+ if (!IS_ALIGNED(daddr, ps)) {
+ pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
+ __func__, &daddr, ps);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj);
+out_put:
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int igt_lmem_create_cleared_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 size, i;
+ int err;
+
+ i915_gem_drain_freed_objects(i915);
+
+ size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
+ size = round_up(size, PAGE_SIZE);
+ i = 0;
+
+ do {
+ struct drm_i915_gem_object *obj;
+ unsigned int flags;
+ u32 dword, val;
+ void *vaddr;
+
+ /*
+ * Alternate between cleared and uncleared allocations, while
+ * also dirtying the pages each time to check that the pages are
+ * always cleared if requested, since we should get some overlap
+ * of the underlying pages, if not all, since we are the only
+ * user.
+ */
+
+ flags = I915_BO_ALLOC_CPU_CLEAR;
+ if (i & 1)
+ flags = 0;
+
+ obj = i915_gem_object_create_lmem(i915, size, flags);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
+ &prng);
+
+ if (flags & I915_BO_ALLOC_CPU_CLEAR) {
+ err = igt_cpu_check(obj, dword, 0);
+ if (err) {
+ pr_err("%s failed with size=%u, flags=%u\n",
+ __func__, size, flags);
+ goto out_unpin;
+ }
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_unpin;
+ }
+
+ val = prandom_u32_state(&prng);
+
+ memset32(vaddr, val, obj->base.size / sizeof(u32));
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj);
+out_put:
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ if (err)
+ break;
+ ++i;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s completed (%u) iterations\n", __func__, i);
+
+ return err;
+}
+
+static int igt_lmem_write_gpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct file *file;
+ I915_RND_STATE(prng);
+ u32 sz;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+ obj = i915_gem_object_create_lmem(i915, sz, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_file;
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err)
+ goto out_put;
+
+ err = igt_gpu_write(ctx, obj);
+ if (err)
+ pr_err("igt_gpu_write failed(%d)\n", err);
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+out_file:
+ fput(file);
+ return err;
+}
+
+static struct intel_engine_cs *
+random_engine_class(struct drm_i915_private *i915,
+ unsigned int class,
+ struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for (engine = intel_engine_lookup_user(i915, class, 0);
+ engine && engine->uabi_class == class;
+ engine = rb_entry_safe(rb_next(&engine->uabi_node),
+ typeof(*engine), uabi_node))
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ return intel_engine_lookup_user(i915, class, count);
+}
+
+static int igt_lmem_write_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 bytes[] = {
+ 0, /* rng placeholder */
+ sizeof(u32),
+ sizeof(u64),
+ 64, /* cl */
+ PAGE_SIZE,
+ PAGE_SIZE - sizeof(u32),
+ PAGE_SIZE - sizeof(u64),
+ PAGE_SIZE - 64,
+ };
+ struct intel_engine_cs *engine;
+ struct i915_request *rq;
+ u32 *vaddr;
+ u32 sz;
+ u32 i;
+ int *order;
+ int count;
+ int err;
+
+ engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
+ if (!engine)
+ return 0;
+
+ pr_info("%s: using %s\n", __func__, engine->name);
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+ sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+ obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ i915_gem_object_lock(obj, NULL);
+
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ goto out_put;
+ }
+
+ /* Put the pages into a known state -- from the gpu for added fun */
+ intel_engine_pm_get(engine);
+ err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
+ obj->mm.pages->sgl, I915_CACHE_NONE,
+ true, 0xdeadbeaf, &rq);
+ if (rq) {
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_WRITE);
+ i915_request_put(rq);
+ }
+
+ intel_engine_pm_put(engine);
+ if (!err)
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out_unpin;
+
+ count = ARRAY_SIZE(bytes);
+ order = i915_random_order(count * count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_unpin;
+ }
+
+ /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
+ bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
+ GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
+
+ i = 0;
+ do {
+ u32 offset;
+ u32 align;
+ u32 dword;
+ u32 size;
+ u32 val;
+
+ size = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+ offset = igt_random_offset(&prng, 0, obj->base.size,
+ size, align);
+
+ val = prandom_u32_state(&prng);
+ memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+ size / sizeof(u32));
+
+ /*
+ * Sample random dw -- don't waste precious time reading every
+ * single dw.
+ */
+ dword = igt_random_offset(&prng, offset,
+ offset + size,
+ sizeof(u32), sizeof(u32));
+ dword /= sizeof(u32);
+ if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+ pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+ __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+ size, align, offset);
+ err = -EINVAL;
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static const char *repr_type(u32 type)
+{
+ switch (type) {
+ case I915_MAP_WB:
+ return "WB";
+ case I915_MAP_WC:
+ return "WC";
+ }
+
+ return "";
+}
+
+static struct drm_i915_gem_object *
+create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
+ void **out_addr)
+{
+ struct drm_i915_gem_object *obj;
+ void *addr;
+
+ obj = i915_gem_object_create_region(mr, size, 0, 0);
+ if (IS_ERR(obj)) {
+ if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
+ return ERR_PTR(-ENODEV);
+ return obj;
+ }
+
+ addr = i915_gem_object_pin_map_unlocked(obj, type);
+ if (IS_ERR(addr)) {
+ i915_gem_object_put(obj);
+ if (PTR_ERR(addr) == -ENXIO)
+ return ERR_PTR(-ENODEV);
+ return addr;
+ }
+
+ *out_addr = addr;
+ return obj;
+}
+
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static void igt_memcpy_long(void *dst, const void *src, size_t size)
+{
+ unsigned long *tmp = dst;
+ const unsigned long *s = src;
+
+ size = size / sizeof(unsigned long);
+ while (size--)
+ *tmp++ = *s++;
+}
+
+static inline void igt_memcpy(void *dst, const void *src, size_t size)
+{
+ memcpy(dst, src, size);
+}
+
+static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
+{
+ i915_memcpy_from_wc(dst, src, size);
+}
+
+static int _perf_memcpy(struct intel_memory_region *src_mr,
+ struct intel_memory_region *dst_mr,
+ u64 size, u32 src_type, u32 dst_type)
+{
+ struct drm_i915_private *i915 = src_mr->i915;
+ const struct {
+ const char *name;
+ void (*copy)(void *dst, const void *src, size_t size);
+ bool skip;
+ } tests[] = {
+ {
+ "memcpy",
+ igt_memcpy,
+ },
+ {
+ "memcpy_long",
+ igt_memcpy_long,
+ },
+ {
+ "memcpy_from_wc",
+ igt_memcpy_from_wc,
+ !i915_has_memcpy_from_wc(),
+ },
+ };
+ struct drm_i915_gem_object *src, *dst;
+ void *src_addr, *dst_addr;
+ int ret = 0;
+ int i;
+
+ src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto out;
+ }
+
+ dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto out_unpin_src;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+ ktime_t t[5];
+ int pass;
+
+ if (tests[i].skip)
+ continue;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ tests[i].copy(dst_addr, src_addr, size);
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ if (t[0] <= 0) {
+ /* ignore the impossible to protect our sanity */
+ pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
+ __func__,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
+ t[0], t[4]);
+ continue;
+ }
+
+ pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
+ __func__,
+ src_mr->name, repr_type(src_type),
+ dst_mr->name, repr_type(dst_type),
+ tests[i].name, size >> 10,
+ div64_u64(mul_u32_u32(4 * size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+
+ cond_resched();
+ }
+
+ i915_gem_object_unpin_map(dst);
+ i915_gem_object_put(dst);
+out_unpin_src:
+ i915_gem_object_unpin_map(src);
+ i915_gem_object_put(src);
+
+ i915_gem_drain_freed_objects(i915);
+out:
+ if (ret == -ENODEV)
+ ret = 0;
+
+ return ret;
+}
+
+static int perf_memcpy(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const u32 types[] = {
+ I915_MAP_WB,
+ I915_MAP_WC,
+ };
+ static const u32 sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_4M,
+ };
+ struct intel_memory_region *src_mr, *dst_mr;
+ int src_id, dst_id;
+ int i, j, k;
+ int ret;
+
+ for_each_memory_region(src_mr, i915, src_id) {
+ for_each_memory_region(dst_mr, i915, dst_id) {
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ for (j = 0; j < ARRAY_SIZE(types); ++j) {
+ for (k = 0; k < ARRAY_SIZE(types); ++k) {
+ ret = _perf_memcpy(src_mr,
+ dst_mr,
+ sizes[i],
+ types[j],
+ types[k]);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int intel_memory_region_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_reserve),
+ SUBTEST(igt_mock_fill),
+ SUBTEST(igt_mock_contiguous),
+ SUBTEST(igt_mock_splintered_region),
+ SUBTEST(igt_mock_max_segment),
+ SUBTEST(igt_mock_io_size),
+ };
+ struct intel_memory_region *mem;
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ err = PTR_ERR(mem);
+ goto out_unref;
+ }
+
+ err = i915_subtests(tests, mem);
+
+ intel_memory_region_destroy(mem);
+out_unref:
+ mock_destroy_device(i915);
+ return err;
+}
+
+int intel_memory_region_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_create_with_ps),
+ SUBTEST(igt_lmem_create_cleared_cpu),
+ SUBTEST(igt_lmem_write_cpu),
+ SUBTEST(igt_lmem_write_gpu),
+ };
+
+ if (!HAS_LMEM(i915)) {
+ pr_info("device lacks LMEM support, skipping\n");
+ return 0;
+ }
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
+
+int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_memcpy),
+ };
+
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
new file mode 100644
index 000000000..2990dd4d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+//#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
+
+#include "selftests/intel_scheduler_helpers.h"
+
+#define REDUCED_TIMESLICE 5
+#define REDUCED_PREEMPT 10
+#define WAIT_FOR_RESET_TIME 10000
+
+struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ return engine;
+
+ pr_err("No valid engine found!\n");
+ return NULL;
+}
+
+int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+ struct intel_selftest_saved_policy *saved,
+ enum selftest_scheduler_modify modify_type)
+{
+ int err;
+
+ saved->reset = engine->i915->params.reset;
+ saved->flags = engine->flags;
+ saved->timeslice = engine->props.timeslice_duration_ms;
+ saved->preempt_timeout = engine->props.preempt_timeout_ms;
+
+ switch (modify_type) {
+ case SELFTEST_SCHEDULER_MODIFY_FAST_RESET:
+ /*
+ * Enable force pre-emption on time slice expiration
+ * together with engine reset on pre-emption timeout.
+ * This is required to make the GuC notice and reset
+ * the single hanging context.
+ * Also, reduce the preemption timeout to something
+ * small to speed the test up.
+ */
+ engine->i915->params.reset = 2;
+ engine->flags |= I915_ENGINE_WANT_FORCED_PREEMPTION;
+ engine->props.timeslice_duration_ms = REDUCED_TIMESLICE;
+ engine->props.preempt_timeout_ms = REDUCED_PREEMPT;
+ break;
+
+ case SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK:
+ engine->props.preempt_timeout_ms = 0;
+ break;
+
+ default:
+ pr_err("Invalid scheduler policy modification type: %d!\n", modify_type);
+ return -EINVAL;
+ }
+
+ if (!intel_engine_uses_guc(engine))
+ return 0;
+
+ err = intel_guc_global_policies_update(&engine->gt->uc.guc);
+ if (err)
+ intel_selftest_restore_policy(engine, saved);
+
+ return err;
+}
+
+int intel_selftest_restore_policy(struct intel_engine_cs *engine,
+ struct intel_selftest_saved_policy *saved)
+{
+ /* Restore the original policies */
+ engine->i915->params.reset = saved->reset;
+ engine->flags = saved->flags;
+ engine->props.timeslice_duration_ms = saved->timeslice;
+ engine->props.preempt_timeout_ms = saved->preempt_timeout;
+
+ if (!intel_engine_uses_guc(engine))
+ return 0;
+
+ return intel_guc_global_policies_update(&engine->gt->uc.guc);
+}
+
+int intel_selftest_wait_for_rq(struct i915_request *rq)
+{
+ long ret;
+
+ ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
new file mode 100644
index 000000000..ae60bb507
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_SELFTEST_SCHEDULER_HELPERS_H_
+#define _INTEL_SELFTEST_SCHEDULER_HELPERS_H_
+
+#include <linux/types.h>
+
+struct i915_request;
+struct intel_engine_cs;
+struct intel_gt;
+
+struct intel_selftest_saved_policy {
+ u32 flags;
+ u32 reset;
+ u64 timeslice;
+ u64 preempt_timeout;
+};
+
+enum selftest_scheduler_modify {
+ SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK = 0,
+ SELFTEST_SCHEDULER_MODIFY_FAST_RESET,
+};
+
+struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt);
+int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+ struct intel_selftest_saved_policy *saved,
+ enum selftest_scheduler_modify modify_type);
+int intel_selftest_restore_policy(struct intel_engine_cs *engine,
+ struct intel_selftest_saved_policy *saved);
+int intel_selftest_wait_for_rq(struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
new file mode 100644
index 000000000..fda9bb79c
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
+ unsigned int num_ranges,
+ bool is_watertight)
+{
+ unsigned int i;
+ s32 prev;
+
+ for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ /* Check that the table is watertight */
+ if (is_watertight && (prev + 1) != (s32)ranges->start) {
+ pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
+ __func__, i, ranges->start, ranges->end, prev);
+ return -EINVAL;
+ }
+
+ /* Check that the table never goes backwards */
+ if (prev >= (s32)ranges->start) {
+ pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
+ __func__, i, ranges->start, ranges->end, prev);
+ return -EINVAL;
+ }
+
+ /* Check that the entry is valid */
+ if (ranges->start >= ranges->end) {
+ pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
+ __func__, i, ranges->start, ranges->end);
+ return -EINVAL;
+ }
+
+ prev = ranges->end;
+ }
+
+ return 0;
+}
+
+static int intel_shadow_table_check(void)
+{
+ struct {
+ const struct i915_range *regs;
+ unsigned int size;
+ } range_lists[] = {
+ { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
+ { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
+ { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
+ { dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
+ { pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
+ };
+ const struct i915_range *range;
+ unsigned int i, j;
+ s32 prev;
+
+ for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
+ range = range_lists[j].regs;
+ for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
+ if (range->end < range->start) {
+ pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
+ __func__, i, range->start, range->end);
+ return -EINVAL;
+ }
+
+ if (prev >= (s32)range->start) {
+ pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
+ __func__, i, range->start, range->end, prev);
+ return -EINVAL;
+ }
+
+ if (range->start % 4) {
+ pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
+ __func__, i, range->start, range->end);
+ return -EINVAL;
+ }
+
+ prev = range->end;
+ }
+ }
+
+ return 0;
+}
+
+int intel_uncore_mock_selftests(void)
+{
+ struct {
+ const struct intel_forcewake_range *ranges;
+ unsigned int num_ranges;
+ bool is_watertight;
+ } fw[] = {
+ { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
+ { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
+ { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
+ { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
+ { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
+ { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
+ { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
+ };
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(fw); i++) {
+ err = intel_fw_table_check(fw[i].ranges,
+ fw[i].num_ranges,
+ fw[i].is_watertight);
+ if (err)
+ return err;
+ }
+
+ err = intel_shadow_table_check();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int live_forcewake_ops(void *arg)
+{
+ static const struct reg {
+ const char *name;
+ u8 min_graphics_ver;
+ u8 max_graphics_ver;
+ unsigned long platforms;
+ unsigned int offset;
+ } registers[] = {
+ {
+ "RING_START",
+ 6, 7,
+ 0x38,
+ },
+ {
+ "RING_MI_MODE",
+ 8, U8_MAX,
+ 0x9c,
+ }
+ };
+ const struct reg *r;
+ struct intel_gt *gt = arg;
+ struct intel_uncore_forcewake_domain *domain;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned int tmp;
+ int err = 0;
+
+ GEM_BUG_ON(gt->awake);
+
+ /* vlv/chv with their pcu behave differently wrt reads */
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
+ pr_debug("PCU fakes forcewake badly; skipping\n");
+ return 0;
+ }
+
+ /*
+ * Not quite as reliable across the gen as one would hope.
+ *
+ * Either our theory of operation is incorrect, or there remain
+ * external parties interfering with the powerwells.
+ *
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110210
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ /* We have to pick carefully to get the exact behaviour we need */
+ for (r = registers; r->name; r++)
+ if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
+ break;
+ if (!r->name) {
+ pr_debug("Forcewaked register not known for %s; skipping\n",
+ intel_platform_name(INTEL_INFO(gt->i915)->platform));
+ return 0;
+ }
+
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ for_each_fw_domain(domain, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (!hrtimer_cancel(&domain->timer))
+ continue;
+
+ intel_uncore_fw_release_timer(&domain->timer);
+ }
+
+ for_each_engine(engine, gt, id) {
+ i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
+ u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
+ enum forcewake_domains fw_domains;
+ u32 val;
+
+ if (!engine->default_state)
+ continue;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
+ FW_REG_READ);
+ if (!fw_domains)
+ continue;
+
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ if (!domain->wake_count)
+ continue;
+
+ pr_err("fw_domain %s still active, aborting test!\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ intel_uncore_forcewake_get(uncore, fw_domains);
+ val = readl(reg);
+ intel_uncore_forcewake_put(uncore, fw_domains);
+
+ /* Flush the forcewake release (delayed onto a timer) */
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (hrtimer_cancel(&domain->timer))
+ intel_uncore_fw_release_timer(&domain->timer);
+
+ preempt_disable();
+ err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
+ preempt_enable();
+ if (err) {
+ pr_err("Failed to clear fw_domain %s\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ goto out_rpm;
+ }
+ }
+
+ if (!val) {
+ pr_err("%s:%s was zero while fw was held!\n",
+ engine->name, r->name);
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ /* We then expect the read to return 0 outside of the fw */
+ if (wait_for(readl(reg) == 0, 100)) {
+ pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
+ engine->name, r->name, readl(reg), fw_domains);
+ err = -ETIMEDOUT;
+ goto out_rpm;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+ return err;
+}
+
+static int live_forcewake_domains(void *arg)
+{
+#define FW_RANGE 0x40000
+ struct intel_gt *gt = arg;
+ struct intel_uncore *uncore = gt->uncore;
+ unsigned long *valid;
+ u32 offset;
+ int err;
+
+ if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
+ !IS_VALLEYVIEW(gt->i915) &&
+ !IS_CHERRYVIEW(gt->i915))
+ return 0;
+
+ /*
+ * This test may lockup the machine or cause GPU hangs afterwards.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
+ if (!valid)
+ return -ENOMEM;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ check_for_unclaimed_mmio(uncore);
+ for (offset = 0; offset < FW_RANGE; offset += 4) {
+ i915_reg_t reg = { offset };
+
+ intel_uncore_posting_read_fw(uncore, reg);
+ if (!check_for_unclaimed_mmio(uncore))
+ set_bit(offset, valid);
+ }
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ err = 0;
+ for_each_set_bit(offset, valid, FW_RANGE) {
+ i915_reg_t reg = { offset };
+
+ iosf_mbi_punit_acquire();
+ intel_uncore_forcewake_reset(uncore);
+ iosf_mbi_punit_release();
+
+ check_for_unclaimed_mmio(uncore);
+
+ intel_uncore_posting_read_fw(uncore, reg);
+ if (check_for_unclaimed_mmio(uncore)) {
+ pr_err("Unclaimed mmio read to register 0x%04x\n",
+ offset);
+ err = -EINVAL;
+ }
+ }
+
+ bitmap_free(valid);
+ return err;
+}
+
+static int live_fw_table(void *arg)
+{
+ struct intel_gt *gt = arg;
+
+ /* Confirm the table we load is still valid */
+ return intel_fw_table_check(gt->uncore->fw_domains_table,
+ gt->uncore->fw_domains_table_entries,
+ GRAPHICS_VER(gt->i915) >= 9);
+}
+
+int intel_uncore_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_fw_table),
+ SUBTEST(live_forcewake_ops),
+ SUBTEST(live_forcewake_domains),
+ };
+
+ return intel_gt_live_subtests(tests, to_gt(i915));
+}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
new file mode 100644
index 000000000..bf2752cc1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "lib_sw_fence.h"
+
+/* Small library of different fence types useful for writing tests */
+
+static int
+nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+ const char *name,
+ struct lock_class_key *key)
+{
+ debug_fence_init_onstack(fence);
+
+ __init_waitqueue_head(&fence->wait, name, key);
+ atomic_set(&fence->pending, 1);
+ fence->error = 0;
+ fence->fn = nop_fence_notify;
+}
+
+void onstack_fence_fini(struct i915_sw_fence *fence)
+{
+ if (!fence->fn)
+ return;
+
+ i915_sw_fence_commit(fence);
+ i915_sw_fence_fini(fence);
+}
+
+static void timed_fence_wake(struct timer_list *t)
+{
+ struct timed_fence *tf = from_timer(tf, t, timer);
+
+ i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires)
+{
+ onstack_fence_init(&tf->fence);
+
+ timer_setup_on_stack(&tf->timer, timed_fence_wake, 0);
+
+ if (time_after(expires, jiffies))
+ mod_timer(&tf->timer, expires);
+ else
+ i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_fini(struct timed_fence *tf)
+{
+ if (del_timer_sync(&tf->timer))
+ i915_sw_fence_commit(&tf->fence);
+
+ destroy_timer_on_stack(&tf->timer);
+ i915_sw_fence_fini(&tf->fence);
+}
+
+struct heap_fence {
+ struct i915_sw_fence fence;
+ union {
+ struct kref ref;
+ struct rcu_head rcu;
+ };
+};
+
+static int
+heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct heap_fence *h = container_of(fence, typeof(*h), fence);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ break;
+
+ case FENCE_FREE:
+ heap_fence_put(&h->fence);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct i915_sw_fence *heap_fence_create(gfp_t gfp)
+{
+ struct heap_fence *h;
+
+ h = kmalloc(sizeof(*h), gfp);
+ if (!h)
+ return NULL;
+
+ i915_sw_fence_init(&h->fence, heap_fence_notify);
+ refcount_set(&h->ref.refcount, 2);
+
+ return &h->fence;
+}
+
+static void heap_fence_release(struct kref *ref)
+{
+ struct heap_fence *h = container_of(ref, typeof(*h), ref);
+
+ i915_sw_fence_fini(&h->fence);
+
+ kfree_rcu(h, rcu);
+}
+
+void heap_fence_put(struct i915_sw_fence *fence)
+{
+ struct heap_fence *h = container_of(fence, typeof(*h), fence);
+
+ kref_put(&h->ref, heap_fence_release);
+}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
new file mode 100644
index 000000000..e54d6bc23
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * lib_sw_fence.h - library routines for testing N:M synchronisation points
+ *
+ * Copyright (C) 2017 Intel Corporation
+ */
+
+#ifndef _LIB_SW_FENCE_H_
+#define _LIB_SW_FENCE_H_
+
+#include <linux/timer.h>
+
+#include "../i915_sw_fence.h"
+
+#ifdef CONFIG_LOCKDEP
+#define onstack_fence_init(fence) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __onstack_fence_init((fence), #fence, &__key); \
+} while (0)
+#else
+#define onstack_fence_init(fence) \
+ __onstack_fence_init((fence), NULL, NULL)
+#endif
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+ const char *name,
+ struct lock_class_key *key);
+void onstack_fence_fini(struct i915_sw_fence *fence);
+
+struct timed_fence {
+ struct i915_sw_fence fence;
+ struct timer_list timer;
+};
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires);
+void timed_fence_fini(struct timed_fence *tf);
+
+struct i915_sw_fence *heap_fence_create(gfp_t gfp);
+void heap_fence_put(struct i915_sw_fence *fence);
+
+#endif /* _LIB_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
new file mode 100644
index 000000000..eb03b5b28
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <asm/msr.h>
+
+#include "i915_drv.h"
+#include "librapl.h"
+
+bool librapl_supported(const struct drm_i915_private *i915)
+{
+ /* Discrete cards require hwmon integration */
+ if (IS_DGFX(i915))
+ return false;
+
+ return librapl_energy_uJ();
+}
+
+u64 librapl_energy_uJ(void)
+{
+ unsigned long long power;
+ u32 units;
+
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+ return 0;
+
+ units = (power & 0x1f00) >> 8;
+
+ if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+ return 0;
+
+ return (1000000 * power) >> units; /* convert to uJ */
+}
diff --git a/drivers/gpu/drm/i915/selftests/librapl.h b/drivers/gpu/drm/i915/selftests/librapl.h
new file mode 100644
index 000000000..e3b24fad0
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/librapl.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SELFTEST_LIBRAPL_H
+#define SELFTEST_LIBRAPL_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+bool librapl_supported(const struct drm_i915_private *i915);
+
+u64 librapl_energy_uJ(void);
+
+#endif /* SELFTEST_LIBRAPL_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.h b/drivers/gpu/drm/i915/selftests/mock_drm.h
new file mode 100644
index 000000000..9916b6f95
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_DRM_H
+#define __MOCK_DRM_H
+
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+
+struct drm_file;
+struct file;
+
+static inline struct file *mock_file(struct drm_i915_private *i915)
+{
+ return mock_drm_getfile(i915->drm.primary, O_RDWR);
+}
+
+static inline struct drm_file *to_drm_file(struct file *f)
+{
+ return f->private_data;
+}
+
+#endif /* !__MOCK_DRM_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
new file mode 100644
index 000000000..fff11c90f
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/iommu.h>
+
+#include <drm/drm_managed.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
+#include "gt/mock_engine.h"
+#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
+
+#include "mock_request.h"
+#include "mock_gem_device.h"
+#include "mock_gtt.h"
+#include "mock_uncore.h"
+#include "mock_region.h"
+
+#include "gem/selftests/mock_context.h"
+#include "gem/selftests/mock_gem_object.h"
+
+void mock_device_flush(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt = to_gt(i915);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ do {
+ for_each_engine(engine, gt, id)
+ mock_engine_flush(engine);
+ } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT,
+ NULL));
+}
+
+static void mock_device_release(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ if (!i915->do_release)
+ goto out;
+
+ mock_device_flush(i915);
+ intel_gt_driver_remove(to_gt(i915));
+
+ i915_gem_drain_workqueue(i915);
+ i915_gem_drain_freed_objects(i915);
+
+ mock_fini_ggtt(to_gt(i915)->ggtt);
+ destroy_workqueue(i915->wq);
+
+ intel_region_ttm_device_fini(i915);
+ intel_gt_driver_late_release_all(i915);
+ intel_memory_regions_driver_release(i915);
+
+ drm_mode_config_cleanup(&i915->drm);
+
+out:
+ i915_params_free(&i915->params);
+}
+
+static const struct drm_driver mock_driver = {
+ .name = "mock",
+ .driver_features = DRIVER_GEM,
+ .release = mock_device_release,
+};
+
+static void release_dev(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ kfree(pdev);
+}
+
+static int pm_domain_resume(struct device *dev)
+{
+ return pm_generic_runtime_resume(dev);
+}
+
+static int pm_domain_suspend(struct device *dev)
+{
+ return pm_generic_runtime_suspend(dev);
+}
+
+static struct dev_pm_domain pm_domain = {
+ .ops = {
+ .runtime_suspend = pm_domain_suspend,
+ .runtime_resume = pm_domain_resume,
+ },
+};
+
+static void mock_gt_probe(struct drm_i915_private *i915)
+{
+ i915->gt[0] = &i915->gt0;
+ i915->gt[0]->name = "Mock GT";
+}
+
+struct drm_i915_private *mock_gem_device(void)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+ static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
+#endif
+ struct drm_i915_private *i915;
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return NULL;
+ device_initialize(&pdev->dev);
+ pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
+ pdev->dev.release = release_dev;
+ dev_set_name(&pdev->dev, "mock");
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+ /* HACK to disable iommu for the fake device; force identity mapping */
+ pdev->dev.iommu = &fake_iommu;
+#endif
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ put_device(&pdev->dev);
+ return NULL;
+ }
+
+ i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
+ struct drm_i915_private, drm);
+ if (IS_ERR(i915)) {
+ pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
+ devres_release_group(&pdev->dev, NULL);
+ put_device(&pdev->dev);
+
+ return NULL;
+ }
+
+ pci_set_drvdata(pdev, i915);
+
+ dev_pm_domain_set(&pdev->dev, &pm_domain);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ if (pm_runtime_enabled(&pdev->dev))
+ WARN_ON(pm_runtime_get_sync(&pdev->dev));
+
+
+ i915_params_copy(&i915->params, &i915_modparams);
+
+ intel_runtime_pm_init_early(&i915->runtime_pm);
+ /* wakeref tracking has significant overhead */
+ i915->runtime_pm.no_wakeref_tracking = true;
+
+ /* Using the global GTT may ask questions about KMS users, so prepare */
+ drm_mode_config_init(&i915->drm);
+
+ RUNTIME_INFO(i915)->graphics.ip.ver = -1;
+
+ RUNTIME_INFO(i915)->page_sizes =
+ I915_GTT_PAGE_SIZE_4K |
+ I915_GTT_PAGE_SIZE_64K |
+ I915_GTT_PAGE_SIZE_2M;
+
+ RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
+ intel_memory_regions_hw_probe(i915);
+
+ spin_lock_init(&i915->gpu_error.lock);
+
+ i915_gem_init__mm(i915);
+ intel_root_gt_init_early(i915);
+ mock_uncore_init(&i915->uncore, i915);
+ atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
+ to_gt(i915)->awake = -ENODEV;
+ mock_gt_probe(i915);
+
+ ret = intel_region_ttm_device_init(i915);
+ if (ret)
+ goto err_ttm;
+
+ i915->wq = alloc_ordered_workqueue("mock", 0);
+ if (!i915->wq)
+ goto err_drv;
+
+ mock_init_contexts(i915);
+
+ /* allocate the ggtt */
+ ret = intel_gt_assign_ggtt(to_gt(i915));
+ if (ret)
+ goto err_unlock;
+
+ mock_init_ggtt(to_gt(i915));
+ to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
+
+ RUNTIME_INFO(i915)->platform_engine_mask = BIT(0);
+ to_gt(i915)->info.engine_mask = BIT(0);
+
+ to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!to_gt(i915)->engine[RCS0])
+ goto err_unlock;
+
+ if (mock_engine_init(to_gt(i915)->engine[RCS0]))
+ goto err_context;
+
+ __clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags);
+ intel_engines_driver_register(i915);
+
+ i915->do_release = true;
+ ida_init(&i915->selftest.mock_region_instances);
+
+ return i915;
+
+err_context:
+ intel_gt_driver_remove(to_gt(i915));
+err_unlock:
+ destroy_workqueue(i915->wq);
+err_drv:
+ intel_region_ttm_device_fini(i915);
+err_ttm:
+ intel_gt_driver_late_release_all(i915);
+ intel_memory_regions_driver_release(i915);
+ drm_mode_config_cleanup(&i915->drm);
+ mock_destroy_device(i915);
+
+ return NULL;
+}
+
+void mock_destroy_device(struct drm_i915_private *i915)
+{
+ struct device *dev = i915->drm.dev;
+
+ devres_release_group(dev, NULL);
+ put_device(dev);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.h b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
new file mode 100644
index 000000000..953cfe4fa
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MOCK_GEM_DEVICE_H__
+#define __MOCK_GEM_DEVICE_H__
+
+struct drm_i915_private;
+
+struct drm_i915_private *mock_gem_device(void);
+void mock_device_flush(struct drm_i915_private *i915);
+
+void mock_destroy_device(struct drm_i915_private *i915);
+
+#endif /* !__MOCK_GEM_DEVICE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
new file mode 100644
index 000000000..568840e7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_gtt.h"
+
+static void mock_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+}
+
+static void mock_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level, u32 flags)
+{
+}
+
+static void mock_bind_ppgtt(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
+ vma_res->bound_flags |= flags;
+}
+
+static void mock_unbind_ppgtt(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+}
+
+static void mock_cleanup(struct i915_address_space *vm)
+{
+}
+
+static void mock_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
+{
+ struct i915_ppgtt *ppgtt;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return NULL;
+
+ ppgtt->vm.gt = to_gt(i915);
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
+ ppgtt->vm.dma = i915->drm.dev;
+
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+ ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ ppgtt->vm.clear_range = mock_clear_range;
+ ppgtt->vm.insert_page = mock_insert_page;
+ ppgtt->vm.insert_entries = mock_insert_entries;
+ ppgtt->vm.cleanup = mock_cleanup;
+
+ ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt;
+ ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt;
+
+ return ppgtt;
+}
+
+static void mock_bind_ggtt(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+}
+
+static void mock_unbind_ggtt(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+}
+
+void mock_init_ggtt(struct intel_gt *gt)
+{
+ struct i915_ggtt *ggtt = gt->ggtt;
+
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = gt->i915;
+ ggtt->vm.is_ggtt = true;
+
+ ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ ggtt->vm.total = 4096 * PAGE_SIZE;
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ ggtt->vm.clear_range = mock_clear_range;
+ ggtt->vm.insert_page = mock_insert_page;
+ ggtt->vm.insert_entries = mock_insert_entries;
+ ggtt->vm.cleanup = mock_cleanup;
+
+ ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt;
+ ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
+
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+}
+
+void mock_fini_ggtt(struct i915_ggtt *ggtt)
+{
+ i915_address_space_fini(&ggtt->vm);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
new file mode 100644
index 000000000..d6eb90bd7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_GTT_H
+#define __MOCK_GTT_H
+
+struct drm_i915_private;
+struct i915_ggtt;
+struct intel_gt;
+
+void mock_init_ggtt(struct intel_gt *gt);
+void mock_fini_ggtt(struct i915_ggtt *ggtt);
+
+struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name);
+
+#endif /* !__MOCK_GTT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
new file mode 100644
index 000000000..bac21fe84
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019-2021 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_placement.h>
+#include <linux/scatterlist.h>
+
+#include "gem/i915_gem_region.h"
+#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
+
+#include "mock_region.h"
+
+static void mock_region_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_refct_sgt_put(obj->mm.rsgt);
+ obj->mm.rsgt = NULL;
+ intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
+}
+
+static int mock_region_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *pages;
+ int err;
+
+ obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
+ obj->bo_offset,
+ obj->base.size,
+ obj->flags);
+ if (IS_ERR(obj->mm.res))
+ return PTR_ERR(obj->mm.res);
+
+ obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
+ obj->mm.res,
+ obj->mm.region->min_page_size);
+ if (IS_ERR(obj->mm.rsgt)) {
+ err = PTR_ERR(obj->mm.rsgt);
+ goto err_free_resource;
+ }
+
+ pages = &obj->mm.rsgt->table;
+ __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
+
+ return 0;
+
+err_free_resource:
+ intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
+ return err;
+}
+
+static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+ .name = "mock-region",
+ .get_pages = mock_region_get_pages,
+ .put_pages = mock_region_put_pages,
+ .release = i915_gem_object_release_memory_region,
+};
+
+static int mock_object_init(struct intel_memory_region *mem,
+ struct drm_i915_gem_object *obj,
+ resource_size_t offset,
+ resource_size_t size,
+ resource_size_t page_size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+
+ if (size > resource_size(&mem->region))
+ return -E2BIG;
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
+
+ obj->bo_offset = offset;
+
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem);
+
+ return 0;
+}
+
+static int mock_region_fini(struct intel_memory_region *mem)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ int instance = mem->instance;
+ int ret;
+
+ ret = intel_region_ttm_fini(mem);
+ ida_free(&i915->selftest.mock_region_instances, instance);
+
+ return ret;
+}
+
+static const struct intel_memory_region_ops mock_region_ops = {
+ .init = intel_region_ttm_init,
+ .release = mock_region_fini,
+ .init_object = mock_object_init,
+};
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ resource_size_t io_size)
+{
+ int instance = ida_alloc_max(&i915->selftest.mock_region_instances,
+ TTM_NUM_MEM_TYPES - TTM_PL_PRIV - 1,
+ GFP_KERNEL);
+
+ if (instance < 0)
+ return ERR_PTR(instance);
+
+ return intel_memory_region_create(i915, start, size, min_page_size,
+ io_start, io_size,
+ INTEL_MEMORY_MOCK, instance,
+ &mock_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
new file mode 100644
index 000000000..e36c3a433
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __MOCK_REGION_H
+#define __MOCK_REGION_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_memory_region;
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ resource_size_t io_size);
+
+#endif /* !__MOCK_REGION_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
new file mode 100644
index 000000000..09f747228
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gt/mock_engine.h"
+
+#include "mock_request.h"
+
+struct i915_request *
+mock_request(struct intel_context *ce, unsigned long delay)
+{
+ struct i915_request *request;
+
+ /* NB the i915->requests slab cache is enlarged to fit mock_request */
+ request = intel_context_create_request(ce);
+ if (IS_ERR(request))
+ return NULL;
+
+ request->mock.delay = delay;
+ return request;
+}
+
+bool mock_cancel_request(struct i915_request *request)
+{
+ struct mock_engine *engine =
+ container_of(request->engine, typeof(*engine), base);
+ bool was_queued;
+
+ spin_lock_irq(&engine->hw_lock);
+ was_queued = !list_empty(&request->mock.link);
+ list_del_init(&request->mock.link);
+ spin_unlock_irq(&engine->hw_lock);
+
+ if (was_queued)
+ i915_request_unsubmit(request);
+
+ return was_queued;
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
new file mode 100644
index 000000000..8907b60c2
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_REQUEST__
+#define __MOCK_REQUEST__
+
+#include <linux/list.h>
+
+#include "../i915_request.h"
+
+struct i915_request *
+mock_request(struct intel_context *ce, unsigned long delay);
+
+bool mock_cancel_request(struct i915_request *request);
+
+#endif /* !__MOCK_REQUEST__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
new file mode 100644
index 000000000..f2d6be5e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_uncore.h"
+
+#define __nop_write(x) \
+static void \
+nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { }
+__nop_write(8)
+__nop_write(16)
+__nop_write(32)
+
+#define __nop_read(x) \
+static u##x \
+nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; }
+__nop_read(8)
+__nop_read(16)
+__nop_read(32)
+__nop_read(64)
+
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915)
+{
+ intel_uncore_init_early(uncore, to_gt(i915));
+
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
new file mode 100644
index 000000000..7acf1ef4d
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_UNCORE_H
+#define __MOCK_UNCORE_H
+
+struct drm_i915_private;
+struct intel_uncore;
+
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915);
+
+#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
new file mode 100644
index 000000000..d599186d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/prime_numbers.h>
+#include <linux/random.h>
+
+#include "i915_selftest.h"
+#include "i915_utils.h"
+
+#define PFN_BIAS (1 << 10)
+
+struct pfn_table {
+ struct sg_table st;
+ unsigned long start, end;
+};
+
+typedef unsigned int (*npages_fn_t)(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd);
+
+static noinline int expect_pfn_sg(struct pfn_table *pt,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ const char *who,
+ unsigned long timeout)
+{
+ struct scatterlist *sg;
+ unsigned long pfn, n;
+
+ pfn = pt->start;
+ for_each_sg(pt->st.sgl, sg, pt->st.nents, n) {
+ struct page *page = sg_page(sg);
+ unsigned int npages = npages_fn(n, pt->st.nents, rnd);
+
+ if (page_to_pfn(page) != pfn) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (sg->length != npages * PAGE_SIZE) {
+ pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n",
+ __func__, who, npages * PAGE_SIZE, sg->length);
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn += npages;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt,
+ const char *who,
+ unsigned long timeout)
+{
+ struct sg_page_iter sgiter;
+ unsigned long pfn;
+
+ pfn = pt->start;
+ for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) {
+ struct page *page = sg_page_iter_page(&sgiter);
+
+ if (page != pfn_to_page(pfn)) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn++;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static noinline int expect_pfn_sgtiter(struct pfn_table *pt,
+ const char *who,
+ unsigned long timeout)
+{
+ struct sgt_iter sgt;
+ struct page *page;
+ unsigned long pfn;
+
+ pfn = pt->start;
+ for_each_sgt_page(page, sgt, &pt->st) {
+ if (page != pfn_to_page(pfn)) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn++;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int expect_pfn_sgtable(struct pfn_table *pt,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ const char *who,
+ unsigned long timeout)
+{
+ int err;
+
+ err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout);
+ if (err)
+ return err;
+
+ err = expect_pfn_sg_page_iter(pt, who, timeout);
+ if (err)
+ return err;
+
+ err = expect_pfn_sgtiter(pt, who, timeout);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static unsigned int one(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return 1;
+}
+
+static unsigned int grow(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return n + 1;
+}
+
+static unsigned int shrink(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return count - n;
+}
+
+static unsigned int random(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return 1 + (prandom_u32_state(rnd) % 1024);
+}
+
+static unsigned int random_page_size_pages(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ /* 4K, 64K, 2M */
+ static unsigned int page_count[] = {
+ BIT(12) >> PAGE_SHIFT,
+ BIT(16) >> PAGE_SHIFT,
+ BIT(21) >> PAGE_SHIFT,
+ };
+
+ return page_count[(prandom_u32_state(rnd) % 3)];
+}
+
+static inline bool page_contiguous(struct page *first,
+ struct page *last,
+ unsigned long npages)
+{
+ return first + npages == last;
+}
+
+static int alloc_table(struct pfn_table *pt,
+ unsigned long count, unsigned long max,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ int alloc_error)
+{
+ struct scatterlist *sg;
+ unsigned long n, pfn;
+
+ if (sg_alloc_table(&pt->st, max,
+ GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN))
+ return alloc_error;
+
+ /* count should be less than 20 to prevent overflowing sg->length */
+ GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length));
+
+ /* Construct a table where each scatterlist contains different number
+ * of entries. The idea is to check that we can iterate the individual
+ * pages from inside the coalesced lists.
+ */
+ pt->start = PFN_BIAS;
+ pfn = pt->start;
+ sg = pt->st.sgl;
+ for (n = 0; n < count; n++) {
+ unsigned long npages = npages_fn(n, count, rnd);
+
+ /* Nobody expects the Sparse Memmap! */
+ if (!page_contiguous(pfn_to_page(pfn),
+ pfn_to_page(pfn + npages),
+ npages)) {
+ sg_free_table(&pt->st);
+ return -ENOSPC;
+ }
+
+ if (n)
+ sg = sg_next(sg);
+ sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
+
+ GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
+ GEM_BUG_ON(sg->length != npages * PAGE_SIZE);
+ GEM_BUG_ON(sg->offset != 0);
+
+ pfn += npages;
+ }
+ sg_mark_end(sg);
+ pt->st.nents = n;
+ pt->end = pfn;
+
+ return 0;
+}
+
+static const npages_fn_t npages_funcs[] = {
+ one,
+ grow,
+ shrink,
+ random,
+ random_page_size_pages,
+ NULL,
+};
+
+static int igt_sg_alloc(void *ignored)
+{
+ IGT_TIMEOUT(end_time);
+ const unsigned long max_order = 20; /* approximating a 4GiB object */
+ struct rnd_state prng;
+ unsigned long prime;
+ int alloc_error = -ENOMEM;
+
+ for_each_prime_number(prime, max_order) {
+ unsigned long size = BIT(prime);
+ int offset;
+
+ for (offset = -1; offset <= 1; offset++) {
+ unsigned long sz = size + offset;
+ const npages_fn_t *npages;
+ struct pfn_table pt;
+ int err;
+
+ for (npages = npages_funcs; *npages; npages++) {
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = alloc_table(&pt, sz, sz, *npages, &prng,
+ alloc_error);
+ if (err == -ENOSPC)
+ break;
+ if (err)
+ return err;
+
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = expect_pfn_sgtable(&pt, *npages, &prng,
+ "sg_alloc_table",
+ end_time);
+ sg_free_table(&pt.st);
+ if (err)
+ return err;
+ }
+ }
+
+ /* Test at least one continuation before accepting oom */
+ if (size > SG_MAX_SINGLE_ALLOC)
+ alloc_error = -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int igt_sg_trim(void *ignored)
+{
+ IGT_TIMEOUT(end_time);
+ const unsigned long max = PAGE_SIZE; /* not prime! */
+ struct pfn_table pt;
+ unsigned long prime;
+ int alloc_error = -ENOMEM;
+
+ for_each_prime_number(prime, max) {
+ const npages_fn_t *npages;
+ int err;
+
+ for (npages = npages_funcs; *npages; npages++) {
+ struct rnd_state prng;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ err = alloc_table(&pt, prime, max, *npages, &prng,
+ alloc_error);
+ if (err == -ENOSPC)
+ break;
+ if (err)
+ return err;
+
+ if (i915_sg_trim(&pt.st)) {
+ if (pt.st.orig_nents != prime ||
+ pt.st.nents != prime) {
+ pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n",
+ pt.st.nents, pt.st.orig_nents, prime);
+ err = -EINVAL;
+ } else {
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = expect_pfn_sgtable(&pt,
+ *npages, &prng,
+ "i915_sg_trim",
+ end_time);
+ }
+ }
+ sg_free_table(&pt.st);
+ if (err)
+ return err;
+ }
+
+ /* Test at least one continuation before accepting oom */
+ if (prime > SG_MAX_SINGLE_ALLOC)
+ alloc_error = -ENOSPC;
+ }
+
+ return 0;
+}
+
+int scatterlist_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_sg_alloc),
+ SUBTEST(igt_sg_trim),
+ };
+
+ return i915_subtests(tests, NULL);
+}