1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_gpu_trace.h"
/* Default disabled for now until it has some more testing on the different
* iommu combinations that can be paired with the driver:
*/
static bool enable_eviction = true;
MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
module_param(enable_eviction, bool, 0600);
static bool can_swap(void)
{
return enable_eviction && get_nr_swap_pages() > 0;
}
static bool can_block(struct shrink_control *sc)
{
if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
return false;
return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
}
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv = shrinker->private_data;
unsigned count = priv->lru.dontneed.count;
if (can_swap())
count += priv->lru.willneed.count;
return count;
}
static bool
purge(struct drm_gem_object *obj)
{
if (!is_purgeable(to_msm_bo(obj)))
return false;
if (msm_gem_active(obj))
return false;
msm_gem_purge(obj);
return true;
}
static bool
evict(struct drm_gem_object *obj)
{
if (is_unevictable(to_msm_bo(obj)))
return false;
if (msm_gem_active(obj))
return false;
msm_gem_evict(obj);
return true;
}
static bool
wait_for_idle(struct drm_gem_object *obj)
{
enum dma_resv_usage usage = dma_resv_usage_rw(true);
return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
}
static bool
active_purge(struct drm_gem_object *obj)
{
if (!wait_for_idle(obj))
return false;
return purge(obj);
}
static bool
active_evict(struct drm_gem_object *obj)
{
if (!wait_for_idle(obj))
return false;
return evict(obj);
}
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv = shrinker->private_data;
struct {
struct drm_gem_lru *lru;
bool (*shrink)(struct drm_gem_object *obj);
bool cond;
unsigned long freed;
unsigned long remaining;
} stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true },
{ &priv->lru.willneed, evict, can_swap() },
{ &priv->lru.dontneed, active_purge, can_block(sc) },
{ &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
};
long nr = sc->nr_to_scan;
unsigned long freed = 0;
unsigned long remaining = 0;
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond)
continue;
stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr,
&stages[i].remaining,
stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
remaining += stages[i].remaining;
}
if (freed) {
trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
stages[1].freed, stages[2].freed,
stages[3].freed);
}
return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
}
#ifdef CONFIG_DEBUG_FS
unsigned long
msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
{
struct msm_drm_private *priv = dev->dev_private;
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
};
unsigned long ret = SHRINK_STOP;
fs_reclaim_acquire(GFP_KERNEL);
if (priv->shrinker)
ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
fs_reclaim_release(GFP_KERNEL);
return ret;
}
#endif
/* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again.
* Seems better than unmapping *everything*
*/
static const int vmap_shrink_limit = 15;
static bool
vmap_shrink(struct drm_gem_object *obj)
{
if (!is_vunmapable(to_msm_bo(obj)))
return false;
msm_gem_vunmap(obj);
return true;
}
static int
msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
struct drm_gem_lru *lrus[] = {
&priv->lru.dontneed,
&priv->lru.willneed,
&priv->lru.pinned,
NULL,
};
unsigned idx, unmapped = 0;
unsigned long remaining = 0;
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
&remaining,
vmap_shrink);
}
*(unsigned long *)ptr += unmapped;
if (unmapped > 0)
trace_msm_gem_purge_vmaps(unmapped);
return NOTIFY_DONE;
}
/**
* msm_gem_shrinker_init - Initialize msm shrinker
* @dev: drm device
*
* This function registers and sets up the msm shrinker.
*/
int msm_gem_shrinker_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
if (!priv->shrinker)
return -ENOMEM;
priv->shrinker->count_objects = msm_gem_shrinker_count;
priv->shrinker->scan_objects = msm_gem_shrinker_scan;
priv->shrinker->private_data = priv;
shrinker_register(priv->shrinker);
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
return 0;
}
/**
* msm_gem_shrinker_cleanup - Clean up msm shrinker
* @dev: drm device
*
* This function unregisters the msm shrinker.
*/
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
if (priv->shrinker) {
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
shrinker_free(priv->shrinker);
}
}
|