summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_shrinker.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c73
1 files changed, 42 insertions, 31 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 214763942..d166052eb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -14,6 +14,7 @@
#include <linux/vmalloc.h>
#include "gt/intel_gt_requests.h"
+#include "gt/intel_gt.h"
#include "i915_trace.h"
@@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
- int err = 0;
+ int err = 0, i = 0;
+ struct intel_gt *gt;
/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
@@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
* what we can do is give them a kick so that we do not keep idle
* contexts around longer than is necessary.
*/
- if (shrink & I915_SHRINK_ACTIVE)
- /* Retire requests to unpin all idle contexts */
- intel_gt_retire_requests(to_gt(i915));
+ if (shrink & I915_SHRINK_ACTIVE) {
+ for_each_gt(gt, i915, i)
+ /* Retire requests to unpin all idle contexts */
+ intel_gt_retire_requests(gt);
+ }
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -284,8 +288,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *i915 =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_i915_private *i915 = shrinker->private_data;
unsigned long num_objects;
unsigned long count;
@@ -302,8 +305,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
if (num_objects) {
unsigned long avg = 2 * count / num_objects;
- i915->mm.shrinker.batch =
- max((i915->mm.shrinker.batch + avg) >> 1,
+ i915->mm.shrinker->batch =
+ max((i915->mm.shrinker->batch + avg) >> 1,
128ul /* default SHRINK_BATCH */);
}
@@ -313,8 +316,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *i915 =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_i915_private *i915 = shrinker->private_data;
unsigned long freed;
sc->nr_scanned = 0;
@@ -389,6 +391,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
+ struct intel_gt *gt;
+ int i;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
@@ -397,24 +401,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
I915_SHRINK_VMAPS);
/* We also want to clear any cached iomaps as they wrap vmap */
- mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
- list_for_each_entry_safe(vma, next,
- &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
- unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (!vma->iomap || i915_vma_is_active(vma))
- continue;
+ for_each_gt(gt, i915, i) {
+ mutex_lock(&gt->ggtt->vm.mutex);
+ list_for_each_entry_safe(vma, next,
+ &gt->ggtt->vm.bound_list, vm_link) {
+ unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!vma->iomap || i915_vma_is_active(vma))
+ continue;
- if (!i915_gem_object_trylock(obj, NULL))
- continue;
+ if (!i915_gem_object_trylock(obj, NULL))
+ continue;
- if (__i915_vma_unbind(vma) == 0)
- freed_pages += count;
+ if (__i915_vma_unbind(vma) == 0)
+ freed_pages += count;
- i915_gem_object_unlock(obj);
+ i915_gem_object_unlock(obj);
+ }
+ mutex_unlock(&gt->ggtt->vm.mutex);
}
- mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
@@ -422,12 +428,17 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
{
- i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
- i915->mm.shrinker.seeks = DEFAULT_SEEKS;
- i915->mm.shrinker.batch = 4096;
- drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
- "drm-i915_gem"));
+ i915->mm.shrinker = shrinker_alloc(0, "drm-i915_gem");
+ if (!i915->mm.shrinker) {
+ drm_WARN_ON(&i915->drm, 1);
+ } else {
+ i915->mm.shrinker->scan_objects = i915_gem_shrinker_scan;
+ i915->mm.shrinker->count_objects = i915_gem_shrinker_count;
+ i915->mm.shrinker->batch = 4096;
+ i915->mm.shrinker->private_data = i915;
+
+ shrinker_register(i915->mm.shrinker);
+ }
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
@@ -443,7 +454,7 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
drm_WARN_ON(&i915->drm,
unregister_oom_notifier(&i915->mm.oom_notifier));
- unregister_shrinker(&i915->mm.shrinker);
+ shrinker_free(i915->mm.shrinker);
}
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,