summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c306
1 files changed, 269 insertions, 37 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index a2024247b..32cd0c978 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -13,11 +13,14 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
+#include <linux/ascii85.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/swap.h>
+#include <generated/xe_wa_oob.h>
+
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
@@ -34,7 +37,6 @@
#include "xe_res_cursor.h"
#include "xe_sync.h"
#include "xe_trace.h"
-#include "generated/xe_wa_oob.h"
#include "xe_wa.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
@@ -480,17 +482,53 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
return 0;
}
+/**
+ * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
+ * @vm: The vm for which we are rebinding.
+ * @exec: The struct drm_exec with the locked GEM objects.
+ * @num_fences: The number of fences to reserve for the operation, not
+ * including rebinds and validations.
+ *
+ * Validates all evicted gem objects and rebinds their vmas. Note that
+ * rebindings may cause evictions and hence the validation-rebind
+ * sequence is rerun until there are no more objects to validate.
+ *
+ * Return: 0 on success, negative error code on error. In particular,
+ * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
+ * the drm_exec transaction needs to be restarted.
+ */
+int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ do {
+ ret = drm_gpuvm_validate(&vm->gpuvm, exec);
+ if (ret)
+ return ret;
+
+ ret = xe_vm_rebind(vm, false);
+ if (ret)
+ return ret;
+ } while (!list_empty(&vm->gpuvm.evict.list));
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ ret = dma_resv_reserve_fences(obj->resv, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
bool *done)
{
int err;
- /*
- * 1 fence for each preempt fence plus a fence for each tile from a
- * possible rebind
- */
- err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
- vm->xe->info.tile_count);
+ err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
if (err)
return err;
@@ -505,7 +543,7 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
return 0;
}
- err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
+ err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
if (err)
return err;
@@ -513,7 +551,13 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
if (err)
return err;
- return drm_gpuvm_validate(&vm->gpuvm, exec);
+ /*
+ * Add validation and rebinding to the locking loop since both can
+ * cause evictions which may require blocing dma_resv locks.
+ * The fence reservation here is intended for the new preempt fences
+ * we attach at the end of the rebind work.
+ */
+ return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
}
static void preempt_rebind_work_func(struct work_struct *w)
@@ -801,6 +845,7 @@ static void xe_vma_free(struct xe_vma *vma)
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
@@ -813,6 +858,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
u8 id;
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
+ bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -847,6 +893,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.va.range = end - start + 1;
if (read_only)
vma->gpuva.flags |= XE_VMA_READ_ONLY;
+ if (dumpable)
+ vma->gpuva.flags |= XE_VMA_DUMPABLE;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -990,35 +1038,26 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
}
/**
- * xe_vm_prepare_vma() - drm_exec utility to lock a vma
+ * xe_vm_lock_vma() - drm_exec utility to lock a vma
* @exec: The drm_exec object we're currently locking for.
* @vma: The vma for witch we want to lock the vm resv and any attached
* object's resv.
- * @num_shared: The number of dma-fence slots to pre-allocate in the
- * objects' reservation objects.
*
* Return: 0 on success, negative error code on error. In particular
* may return -EDEADLK on WW transaction contention and -EINTR if
* an interruptible wait is terminated by a signal.
*/
-int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
- unsigned int num_shared)
+int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_bo *bo = xe_vma_bo(vma);
int err;
XE_WARN_ON(!vm);
- if (num_shared)
- err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
- else
- err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
- if (!err && bo && !bo->vm) {
- if (num_shared)
- err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
- else
- err = drm_exec_lock_obj(exec, &bo->ttm.base);
- }
+
+ err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
+ if (!err && bo && !bo->vm)
+ err = drm_exec_lock_obj(exec, &bo->ttm.base);
return err;
}
@@ -1030,7 +1069,7 @@ static void xe_vma_destroy_unlocked(struct xe_vma *vma)
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
- err = xe_vm_prepare_vma(&exec, vma, 0);
+ err = xe_vm_lock_vma(&exec, vma);
drm_exec_retry_on_contention(&exec);
if (XE_WARN_ON(err))
break;
@@ -1065,7 +1104,9 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
xe_assert(vm->xe, xe_vma_vm(vma) == vm);
lockdep_assert_held(&vm->lock);
+ mutex_lock(&vm->snap_mutex);
err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
+ mutex_unlock(&vm->snap_mutex);
XE_WARN_ON(err); /* Shouldn't be possible */
return err;
@@ -1076,7 +1117,9 @@ static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
xe_assert(vm->xe, xe_vma_vm(vma) == vm);
lockdep_assert_held(&vm->lock);
+ mutex_lock(&vm->snap_mutex);
drm_gpuva_remove(&vma->gpuva);
+ mutex_unlock(&vm->snap_mutex);
if (vm->usm.last_fault_vma == vma)
vm->usm.last_fault_vma = NULL;
}
@@ -1095,7 +1138,7 @@ static struct drm_gpuva_op *xe_vm_op_alloc(void)
static void xe_vm_free(struct drm_gpuvm *gpuvm);
-static struct drm_gpuvm_ops gpuvm_ops = {
+static const struct drm_gpuvm_ops gpuvm_ops = {
.op_alloc = xe_vm_op_alloc,
.vm_bo_validate = xe_gpuvm_validate,
.vm_free = xe_vm_free,
@@ -1303,6 +1346,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->flags = flags;
init_rwsem(&vm->lock);
+ mutex_init(&vm->snap_mutex);
INIT_LIST_HEAD(&vm->rebind_list);
@@ -1428,6 +1472,7 @@ err_close:
return ERR_PTR(err);
err_no_resv:
+ mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
kfree(vm);
@@ -1532,6 +1577,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe->usm.num_vm_in_fault_mode--;
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe->usm.num_vm_in_non_fault_mode--;
+
+ if (vm->usm.asid) {
+ void *lookup;
+
+ xe_assert(xe, xe->info.has_asid);
+ xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
+
+ lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
+ xe_assert(xe, lookup == vm);
+ }
mutex_unlock(&xe->usm.lock);
for_each_tile(tile, xe, id)
@@ -1547,21 +1602,17 @@ static void vm_destroy_work_func(struct work_struct *w)
struct xe_device *xe = vm->xe;
struct xe_tile *tile;
u8 id;
- void *lookup;
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
- if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
- xe_device_mem_access_put(xe);
+ if (xe_vm_in_preempt_fence_mode(vm))
+ flush_work(&vm->preempt.rebind_work);
- if (xe->info.has_asid && vm->usm.asid) {
- mutex_lock(&xe->usm.lock);
- lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
- xe_assert(xe, lookup == vm);
- mutex_unlock(&xe->usm.lock);
- }
- }
+ mutex_destroy(&vm->snap_mutex);
+
+ if (!(vm->flags & XE_VM_FLAG_MIGRATION))
+ xe_device_mem_access_put(xe);
for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]);
@@ -2163,6 +2214,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
@@ -2328,6 +2380,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
struct list_head *ops_list, bool last)
{
+ struct xe_device *xe = vm->xe;
struct xe_vma_op *last_op = NULL;
struct drm_gpuva_op *__op;
int err = 0;
@@ -2356,6 +2409,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
{
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->map.dumpable ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, &op->base.map, op->map.pat_index,
flags);
@@ -2380,6 +2435,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, op->base.remap.prev,
old->pat_index, flags);
@@ -2401,6 +2459,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
xe_vma_end(vma) -
xe_vma_start(old);
op->remap.start = xe_vma_end(vma);
+ vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.start,
+ (ULL)op->remap.range);
}
}
@@ -2411,6 +2472,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
vma = new_vma(vm, op->base.remap.next,
old->pat_index, flags);
@@ -2431,6 +2495,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
op->remap.range -=
xe_vma_end(old) -
xe_vma_start(vma);
+ vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.start,
+ (ULL)op->remap.range);
}
}
break;
@@ -2473,7 +2540,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
lockdep_assert_held_write(&vm->lock);
- err = xe_vm_prepare_vma(exec, vma, 1);
+ err = xe_vm_lock_vma(exec, vma);
if (err)
return err;
@@ -3284,3 +3351,168 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
return 0;
}
+
+struct xe_vm_snapshot {
+ unsigned long num_snaps;
+ struct {
+ u64 ofs, bo_ofs;
+ unsigned long len;
+ struct xe_bo *bo;
+ void *data;
+ struct mm_struct *mm;
+ } snap[];
+};
+
+struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
+{
+ unsigned long num_snaps = 0, i;
+ struct xe_vm_snapshot *snap = NULL;
+ struct drm_gpuva *gpuva;
+
+ if (!vm)
+ return NULL;
+
+ mutex_lock(&vm->snap_mutex);
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ if (gpuva->flags & XE_VMA_DUMPABLE)
+ num_snaps++;
+ }
+
+ if (num_snaps)
+ snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
+ if (!snap)
+ goto out_unlock;
+
+ snap->num_snaps = num_snaps;
+ i = 0;
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+ struct xe_bo *bo = vma->gpuva.gem.obj ?
+ gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
+
+ if (!(gpuva->flags & XE_VMA_DUMPABLE))
+ continue;
+
+ snap->snap[i].ofs = xe_vma_start(vma);
+ snap->snap[i].len = xe_vma_size(vma);
+ if (bo) {
+ snap->snap[i].bo = xe_bo_get(bo);
+ snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
+ } else if (xe_vma_is_userptr(vma)) {
+ struct mm_struct *mm =
+ to_userptr_vma(vma)->userptr.notifier.mm;
+
+ if (mmget_not_zero(mm))
+ snap->snap[i].mm = mm;
+ else
+ snap->snap[i].data = ERR_PTR(-EFAULT);
+
+ snap->snap[i].bo_ofs = xe_vma_userptr(vma);
+ } else {
+ snap->snap[i].data = ERR_PTR(-ENOENT);
+ }
+ i++;
+ }
+
+out_unlock:
+ mutex_unlock(&vm->snap_mutex);
+ return snap;
+}
+
+void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
+{
+ for (int i = 0; i < snap->num_snaps; i++) {
+ struct xe_bo *bo = snap->snap[i].bo;
+ struct iosys_map src;
+ int err;
+
+ if (IS_ERR(snap->snap[i].data))
+ continue;
+
+ snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
+ if (!snap->snap[i].data) {
+ snap->snap[i].data = ERR_PTR(-ENOMEM);
+ goto cleanup_bo;
+ }
+
+ if (bo) {
+ dma_resv_lock(bo->ttm.base.resv, NULL);
+ err = ttm_bo_vmap(&bo->ttm, &src);
+ if (!err) {
+ xe_map_memcpy_from(xe_bo_device(bo),
+ snap->snap[i].data,
+ &src, snap->snap[i].bo_ofs,
+ snap->snap[i].len);
+ ttm_bo_vunmap(&bo->ttm, &src);
+ }
+ dma_resv_unlock(bo->ttm.base.resv);
+ } else {
+ void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
+
+ kthread_use_mm(snap->snap[i].mm);
+ if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
+ err = 0;
+ else
+ err = -EFAULT;
+ kthread_unuse_mm(snap->snap[i].mm);
+
+ mmput(snap->snap[i].mm);
+ snap->snap[i].mm = NULL;
+ }
+
+ if (err) {
+ kvfree(snap->snap[i].data);
+ snap->snap[i].data = ERR_PTR(err);
+ }
+
+cleanup_bo:
+ xe_bo_put(bo);
+ snap->snap[i].bo = NULL;
+ }
+}
+
+void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < snap->num_snaps; i++) {
+ if (IS_ERR(snap->snap[i].data))
+ goto uncaptured;
+
+ drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
+ drm_printf(p, "[%llx].data: ",
+ snap->snap[i].ofs);
+
+ for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
+ u32 *val = snap->snap[i].data + j;
+ char dumped[ASCII85_BUFSZ];
+
+ drm_puts(p, ascii85_encode(*val, dumped));
+ }
+
+ drm_puts(p, "\n");
+ continue;
+
+uncaptured:
+ drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
+ snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
+ PTR_ERR(snap->snap[i].data));
+ }
+}
+
+void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
+{
+ unsigned long i;
+
+ if (!snap)
+ return;
+
+ for (i = 0; i < snap->num_snaps; i++) {
+ if (!IS_ERR(snap->snap[i].data))
+ kvfree(snap->snap[i].data);
+ xe_bo_put(snap->snap[i].bo);
+ if (snap->snap[i].mm)
+ mmput(snap->snap[i].mm);
+ }
+ kvfree(snap);
+}