diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /drivers/gpu/drm/i915/gem | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_create.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_domain.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_mman.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object.h | 89 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h | 104 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_phys.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 73 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/selftests/mock_context.c | 2 |
12 files changed, 181 insertions, 167 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index 385ffc575b..7d97ea2a65 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -6,11 +6,10 @@ #include <drm/drm_cache.h> -#include "display/intel_frontbuffer.h" - #include "i915_config.h" #include "i915_drv.h" #include "i915_gem_clflush.h" +#include "i915_gem_object_frontbuffer.h" #include "i915_sw_fence_work.h" #include "i915_trace.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index d24c0ce880..19156ba4b9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -405,8 +405,8 @@ static int ext_set_pat(struct i915_user_extension __user *base, void *data) BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) != offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd)); - /* Limiting the extension only to Meteor Lake */ - if (!IS_METEORLAKE(i915)) + /* Limiting the extension only to Xe_LPG and beyond */ + if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)) return -ENODEV; if (copy_from_user(&ext, base, sizeof(ext))) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index ffddec1d2a..3770828f2e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -5,7 +5,6 @@ */ #include "display/intel_display.h" -#include "display/intel_frontbuffer.h" #include "gt/intel_gt.h" #include "i915_drv.h" @@ -16,6 +15,7 @@ #include "i915_gem_lmem.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" +#include "i915_gem_object_frontbuffer.h" #include "i915_vma.h" #define VTD_GUARD (168u * I915_GTT_PAGE_SIZE) /* 168 or tile-row PTE padding */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5a687a3686..683fd8d315 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -321,7 +321,7 @@ static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle); static void eb_unpin_engine(struct i915_execbuffer *eb); static void eb_capture_release(struct i915_execbuffer *eb); -static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) +static bool eb_use_cmdparser(const struct i915_execbuffer *eb) { return intel_engine_requires_cmd_parser(eb->context->engine) || (intel_engine_using_cmd_parser(eb->context->engine) && @@ -433,7 +433,7 @@ static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry, return pin_flags; } -static inline int +static int eb_pin_vma(struct i915_execbuffer *eb, const struct drm_i915_gem_exec_object2 *entry, struct eb_vma *ev) @@ -486,7 +486,7 @@ eb_pin_vma(struct i915_execbuffer *eb, return 0; } -static inline void +static void eb_unreserve_vma(struct eb_vma *ev) { if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) @@ -548,7 +548,7 @@ eb_validate_vma(struct i915_execbuffer *eb, return 0; } -static inline bool +static bool is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx) { return eb->args->flags & I915_EXEC_BATCH_FIRST ? @@ -628,8 +628,8 @@ eb_add_vma(struct i915_execbuffer *eb, return 0; } -static inline int use_cpu_reloc(const struct reloc_cache *cache, - const struct drm_i915_gem_object *obj) +static int use_cpu_reloc(const struct reloc_cache *cache, + const struct drm_i915_gem_object *obj) { if (!i915_gem_object_has_struct_page(obj)) return false; @@ -1107,7 +1107,7 @@ static void eb_destroy(const struct i915_execbuffer *eb) kfree(eb->buckets); } -static inline u64 +static u64 relocation_target(const struct drm_i915_gem_relocation_entry *reloc, const struct i915_vma *target) { @@ -1128,19 +1128,19 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->node.flags = 0; } -static inline void *unmask_page(unsigned long p) +static void *unmask_page(unsigned long p) { return (void *)(uintptr_t)(p & PAGE_MASK); } -static inline unsigned int unmask_flags(unsigned long p) +static unsigned int unmask_flags(unsigned long p) { return p & ~PAGE_MASK; } #define KMAP 0x4 /* after CLFLUSH_FLAGS */ -static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) +static struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) { struct drm_i915_private *i915 = container_of(cache, struct i915_execbuffer, reloc_cache)->i915; @@ -1436,7 +1436,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { drm_dbg(&i915->drm, "reloc with multiple write domains: " "target %d offset %d " - "read %08x write %08x", + "read %08x write %08x\n", reloc->target_handle, (int) reloc->offset, reloc->read_domains, @@ -1447,7 +1447,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, & ~I915_GEM_GPU_DOMAINS)) { drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: " "target %d offset %d " - "read %08x write %08x", + "read %08x write %08x\n", reloc->target_handle, (int) reloc->offset, reloc->read_domains, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 310654542b..a2195e28b6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -916,11 +916,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915) { struct file *file; - rcu_read_lock(); - file = READ_ONCE(i915->gem.mmap_singleton); - if (file && !get_file_rcu(file)) - file = NULL; - rcu_read_unlock(); + file = get_file_active(&i915->gem.mmap_singleton); if (file) return file; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index ef9346ed6d..c26d875558 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -37,6 +37,7 @@ #include "i915_gem_dmabuf.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" +#include "i915_gem_object_frontbuffer.h" #include "i915_gem_ttm.h" #include "i915_memcpy.h" #include "i915_trace.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index f607b87890..3560a062d2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -11,7 +11,6 @@ #include <drm/drm_file.h> #include <drm/drm_device.h> -#include "display/intel_frontbuffer.h" #include "intel_memory_region.h" #include "i915_gem_object_types.h" #include "i915_gem_gtt.h" @@ -806,27 +805,6 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); -void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, - enum fb_op_origin origin); -void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, - enum fb_op_origin origin); - -static inline void -i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, - enum fb_op_origin origin) -{ - if (unlikely(rcu_access_pointer(obj->frontbuffer))) - __i915_gem_object_flush_frontbuffer(obj, origin); -} - -static inline void -i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, - enum fb_op_origin origin) -{ - if (unlikely(rcu_access_pointer(obj->frontbuffer))) - __i915_gem_object_invalidate_frontbuffer(obj, origin); -} - int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size); bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); @@ -887,71 +865,4 @@ static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *o #endif -/** - * i915_gem_object_get_frontbuffer - Get the object's frontbuffer - * @obj: The object whose frontbuffer to get. - * - * Get pointer to object's frontbuffer if such exists. Please note that RCU - * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. - * - * Return: pointer to object's frontbuffer is such exists or NULL - */ -static inline struct intel_frontbuffer * -i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj) -{ - struct intel_frontbuffer *front; - - if (likely(!rcu_access_pointer(obj->frontbuffer))) - return NULL; - - rcu_read_lock(); - do { - front = rcu_dereference(obj->frontbuffer); - if (!front) - break; - - if (unlikely(!kref_get_unless_zero(&front->ref))) - continue; - - if (likely(front == rcu_access_pointer(obj->frontbuffer))) - break; - - intel_frontbuffer_put(front); - } while (1); - rcu_read_unlock(); - - return front; -} - -/** - * i915_gem_object_set_frontbuffer - Set the object's frontbuffer - * @obj: The object whose frontbuffer to set. - * @front: The frontbuffer to set - * - * Set object's frontbuffer pointer. If frontbuffer is already set for the - * object keep it and return it's pointer to the caller. Please note that RCU - * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This - * function is protected by i915->display.fb_tracking.lock - * - * Return: pointer to frontbuffer which was set. - */ -static inline struct intel_frontbuffer * -i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj, - struct intel_frontbuffer *front) -{ - struct intel_frontbuffer *cur = front; - - if (!front) { - RCU_INIT_POINTER(obj->frontbuffer, NULL); - } else if (rcu_access_pointer(obj->frontbuffer)) { - cur = rcu_dereference_protected(obj->frontbuffer, true); - kref_get(&cur->ref); - } else { - drm_gem_object_get(intel_bo_to_drm_bo(obj)); - rcu_assign_pointer(obj->frontbuffer, front); - } - - return cur; -} - #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h new file mode 100644 index 0000000000..9fbf14867a --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __I915_GEM_OBJECT_FRONTBUFFER_H__ +#define __I915_GEM_OBJECT_FRONTBUFFER_H__ + +#include <linux/kref.h> +#include <linux/rcupdate.h> + +#include "display/intel_frontbuffer.h" +#include "i915_gem_object_types.h" + +void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin); +void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin); + +static inline void +i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + if (unlikely(rcu_access_pointer(obj->frontbuffer))) + __i915_gem_object_flush_frontbuffer(obj, origin); +} + +static inline void +i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + if (unlikely(rcu_access_pointer(obj->frontbuffer))) + __i915_gem_object_invalidate_frontbuffer(obj, origin); +} + +/** + * i915_gem_object_get_frontbuffer - Get the object's frontbuffer + * @obj: The object whose frontbuffer to get. + * + * Get pointer to object's frontbuffer if such exists. Please note that RCU + * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. + * + * Return: pointer to object's frontbuffer is such exists or NULL + */ +static inline struct intel_frontbuffer * +i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj) +{ + struct intel_frontbuffer *front; + + if (likely(!rcu_access_pointer(obj->frontbuffer))) + return NULL; + + rcu_read_lock(); + do { + front = rcu_dereference(obj->frontbuffer); + if (!front) + break; + + if (unlikely(!kref_get_unless_zero(&front->ref))) + continue; + + if (likely(front == rcu_access_pointer(obj->frontbuffer))) + break; + + intel_frontbuffer_put(front); + } while (1); + rcu_read_unlock(); + + return front; +} + +/** + * i915_gem_object_set_frontbuffer - Set the object's frontbuffer + * @obj: The object whose frontbuffer to set. + * @front: The frontbuffer to set + * + * Set object's frontbuffer pointer. If frontbuffer is already set for the + * object keep it and return it's pointer to the caller. Please note that RCU + * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This + * function is protected by i915->display.fb_tracking.lock + * + * Return: pointer to frontbuffer which was set. + */ +static inline struct intel_frontbuffer * +i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj, + struct intel_frontbuffer *front) +{ + struct intel_frontbuffer *cur = front; + + if (!front) { + RCU_INIT_POINTER(obj->frontbuffer, NULL); + drm_gem_object_put(intel_bo_to_drm_bo(obj)); + } else if (rcu_access_pointer(obj->frontbuffer)) { + cur = rcu_dereference_protected(obj->frontbuffer, true); + kref_get(&cur->ref); + } else { + drm_gem_object_get(intel_bo_to_drm_bo(obj)); + rcu_assign_pointer(obj->frontbuffer, front); + } + + return cur; +} + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 76efe98eaa..5df128e2f4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -13,6 +13,7 @@ #include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_gem_object_frontbuffer.h" #include "i915_gem_region.h" #include "i915_gem_tiling.h" #include "i915_scatterlist.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 214763942a..d166052eb2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -14,6 +14,7 @@ #include <linux/vmalloc.h> #include "gt/intel_gt_requests.h" +#include "gt/intel_gt.h" #include "i915_trace.h" @@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, intel_wakeref_t wakeref = 0; unsigned long count = 0; unsigned long scanned = 0; - int err = 0; + int err = 0, i = 0; + struct intel_gt *gt; /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915); @@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, * what we can do is give them a kick so that we do not keep idle * contexts around longer than is necessary. */ - if (shrink & I915_SHRINK_ACTIVE) - /* Retire requests to unpin all idle contexts */ - intel_gt_retire_requests(to_gt(i915)); + if (shrink & I915_SHRINK_ACTIVE) { + for_each_gt(gt, i915, i) + /* Retire requests to unpin all idle contexts */ + intel_gt_retire_requests(gt); + } /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -284,8 +288,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { - struct drm_i915_private *i915 = - container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_i915_private *i915 = shrinker->private_data; unsigned long num_objects; unsigned long count; @@ -302,8 +305,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) if (num_objects) { unsigned long avg = 2 * count / num_objects; - i915->mm.shrinker.batch = - max((i915->mm.shrinker.batch + avg) >> 1, + i915->mm.shrinker->batch = + max((i915->mm.shrinker->batch + avg) >> 1, 128ul /* default SHRINK_BATCH */); } @@ -313,8 +316,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { - struct drm_i915_private *i915 = - container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_i915_private *i915 = shrinker->private_data; unsigned long freed; sc->nr_scanned = 0; @@ -389,6 +391,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr struct i915_vma *vma, *next; unsigned long freed_pages = 0; intel_wakeref_t wakeref; + struct intel_gt *gt; + int i; with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL, @@ -397,24 +401,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr I915_SHRINK_VMAPS); /* We also want to clear any cached iomaps as they wrap vmap */ - mutex_lock(&to_gt(i915)->ggtt->vm.mutex); - list_for_each_entry_safe(vma, next, - &to_gt(i915)->ggtt->vm.bound_list, vm_link) { - unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; - struct drm_i915_gem_object *obj = vma->obj; - - if (!vma->iomap || i915_vma_is_active(vma)) - continue; + for_each_gt(gt, i915, i) { + mutex_lock(>->ggtt->vm.mutex); + list_for_each_entry_safe(vma, next, + >->ggtt->vm.bound_list, vm_link) { + unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; + struct drm_i915_gem_object *obj = vma->obj; + + if (!vma->iomap || i915_vma_is_active(vma)) + continue; - if (!i915_gem_object_trylock(obj, NULL)) - continue; + if (!i915_gem_object_trylock(obj, NULL)) + continue; - if (__i915_vma_unbind(vma) == 0) - freed_pages += count; + if (__i915_vma_unbind(vma) == 0) + freed_pages += count; - i915_gem_object_unlock(obj); + i915_gem_object_unlock(obj); + } + mutex_unlock(>->ggtt->vm.mutex); } - mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; @@ -422,12 +428,17 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr void i915_gem_driver_register__shrinker(struct drm_i915_private *i915) { - i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan; - i915->mm.shrinker.count_objects = i915_gem_shrinker_count; - i915->mm.shrinker.seeks = DEFAULT_SEEKS; - i915->mm.shrinker.batch = 4096; - drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker, - "drm-i915_gem")); + i915->mm.shrinker = shrinker_alloc(0, "drm-i915_gem"); + if (!i915->mm.shrinker) { + drm_WARN_ON(&i915->drm, 1); + } else { + i915->mm.shrinker->scan_objects = i915_gem_shrinker_scan; + i915->mm.shrinker->count_objects = i915_gem_shrinker_count; + i915->mm.shrinker->batch = 4096; + i915->mm.shrinker->private_data = i915; + + shrinker_register(i915->mm.shrinker); + } i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier)); @@ -443,7 +454,7 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915) unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); drm_WARN_ON(&i915->drm, unregister_oom_notifier(&i915->mm.oom_notifier)); - unregister_shrinker(&i915->mm.shrinker); + shrinker_free(i915->mm.shrinker); } void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index ff81af4c82..10a7847f1b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -83,8 +83,7 @@ static int linear_x_y_to_ftiled_pos(int x, int y, u32 stride, int bpp) enum client_tiling { CLIENT_TILING_LINEAR, CLIENT_TILING_X, - CLIENT_TILING_Y, - CLIENT_TILING_4, + CLIENT_TILING_Y, /* Y-major, either Tile4 (Xe_HP and beyond) or legacy TileY */ CLIENT_NUM_TILING_TYPES }; @@ -165,11 +164,10 @@ static int prepare_blit(const struct tiled_blits *t, BLIT_CCTL_DST_MOCS(gt->mocs.uc_index)); src_pitch = t->width; /* in dwords */ - if (src->tiling == CLIENT_TILING_4) { - src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); - src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4; - } else if (src->tiling == CLIENT_TILING_Y) { + if (src->tiling == CLIENT_TILING_Y) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); + if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) + src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4; } else if (src->tiling == CLIENT_TILING_X) { src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X); } else { @@ -177,11 +175,10 @@ static int prepare_blit(const struct tiled_blits *t, } dst_pitch = t->width; /* in dwords */ - if (dst->tiling == CLIENT_TILING_4) { - dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); - dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4; - } else if (dst->tiling == CLIENT_TILING_Y) { + if (dst->tiling == CLIENT_TILING_Y) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); + if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) + dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4; } else if (dst->tiling == CLIENT_TILING_X) { dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X); } else { @@ -326,12 +323,6 @@ static int tiled_blits_create_buffers(struct tiled_blits *t, t->buffers[i].vma = vma; t->buffers[i].tiling = i915_prandom_u32_max_state(CLIENT_NUM_TILING_TYPES, prng); - - /* Platforms support either TileY or Tile4, not both */ - if (HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_Y) - t->buffers[i].tiling = CLIENT_TILING_4; - else if (!HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_4) - t->buffers[i].tiling = CLIENT_TILING_Y; } return 0; @@ -367,18 +358,19 @@ static u64 tiled_offset(const struct intel_gt *gt, y = div64_u64_rem(v, stride, &x); - if (tiling == CLIENT_TILING_4) { - v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32); - - /* no swizzling for f-tiling */ - swizzle = I915_BIT_6_SWIZZLE_NONE; - } else if (tiling == CLIENT_TILING_X) { + if (tiling == CLIENT_TILING_X) { v = div64_u64_rem(y, 8, &y) * stride * 8; v += y * 512; v += div64_u64_rem(x, 512, &x) << 12; v += x; swizzle = gt->ggtt->bit_6_swizzle_x; + } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) { + /* Y-major tiling layout is Tile4 for Xe_HP and beyond */ + v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32); + + /* no swizzling for f-tiling */ + swizzle = I915_BIT_6_SWIZZLE_NONE; } else { const unsigned int ytile_span = 16; const unsigned int ytile_height = 512; @@ -414,8 +406,7 @@ static const char *repr_tiling(enum client_tiling tiling) switch (tiling) { case CLIENT_TILING_LINEAR: return "linear"; case CLIENT_TILING_X: return "X"; - case CLIENT_TILING_Y: return "Y"; - case CLIENT_TILING_4: return "F"; + case CLIENT_TILING_Y: return "Y / 4"; default: return "unknown"; } } diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 8ac6726ec1..e199d7dbb8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -36,7 +36,7 @@ mock_context(struct drm_i915_private *i915, if (name) { struct i915_ppgtt *ppgtt; - strncpy(ctx->name, name, sizeof(ctx->name) - 1); + strscpy(ctx->name, name, sizeof(ctx->name)); ppgtt = mock_ppgtt(i915, name); if (!ppgtt) |