summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 683fd8d31..555022c06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -9,6 +9,7 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm_auth.h>
#include <drm/drm_syncobj.h>
#include "display/intel_frontbuffer.h"
@@ -253,6 +254,8 @@ struct i915_execbuffer {
struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
+ intel_wakeref_t wakeref;
+ intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -1156,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP)
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
else
io_mapping_unmap_atomic((void __iomem *)vaddr);
}
@@ -1172,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache,
if (cache->vaddr & KMAP) {
struct page *page = i915_gem_object_get_page(obj, cache->page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) |
(unsigned long)vaddr;
} else {
@@ -1202,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
if (cache->vaddr & CLFLUSH_AFTER)
mb();
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
i915_gem_object_finish_access(obj);
} else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
@@ -1234,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
struct page *page;
if (cache->vaddr) {
- kunmap_atomic(unmask_page(cache->vaddr));
+ kunmap_local(unmask_page(cache->vaddr));
} else {
unsigned int flushes;
int err;
@@ -1256,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
if (!obj->mm.dirty)
set_page_dirty(page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
cache->page = pageno;
@@ -1678,7 +1681,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
size = nreloc * sizeof(*relocs);
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
+ relocs = kvmalloc_array(1, size, GFP_KERNEL);
if (!relocs) {
err = -ENOMEM;
goto err;
@@ -2719,13 +2722,13 @@ eb_select_engine(struct i915_execbuffer *eb)
for_each_child(ce, child)
intel_context_get(child);
- intel_gt_pm_get(gt);
+ eb->wakeref = intel_gt_pm_get(ce->engine->gt);
/*
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
* free VMAs while execbuf ioctl is validating VMAs.
*/
if (gt->info.id)
- intel_gt_pm_get(to_gt(gt->i915));
+ eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2765,9 +2768,9 @@ eb_select_engine(struct i915_execbuffer *eb)
err:
if (gt->info.id)
- intel_gt_pm_put(to_gt(gt->i915));
+ intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
intel_context_put(ce);
@@ -2785,8 +2788,8 @@ eb_put_engine(struct i915_execbuffer *eb)
* i915_vma_parked() from interfering while execbuf validates vmas.
*/
if (eb->gt->info.id)
- intel_gt_pm_put(to_gt(eb->gt->i915));
- intel_gt_pm_put(eb->gt);
+ intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
+ intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
intel_context_put(eb->context);