summaryrefslogtreecommitdiffstats
path: root/deps/jemalloc/src/ehooks.c
blob: 383e9de6a6b9e76ce108fca3b3a74ff3afab68c0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"

#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_mmap.h"

void
ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
	/* All other hooks are optional; this one is not. */
	assert(extent_hooks->alloc != NULL);
	ehooks->ind = ind;
	ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
}

/*
 * If the caller specifies (!*zero), it is still possible to receive zeroed
 * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
 * advantage of this to avoid demanding zeroed extents, but taking advantage of
 * them if they are returned.
 */
static void *
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
    size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
	void *ret;

	assert(size != 0);
	assert(alignment != 0);

	/* "primary" dss. */
	if (have_dss && dss_prec == dss_prec_primary && (ret =
	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
	    commit)) != NULL) {
		return ret;
	}
	/* mmap. */
	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
	    != NULL) {
		return ret;
	}
	/* "secondary" dss. */
	if (have_dss && dss_prec == dss_prec_secondary && (ret =
	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
	    commit)) != NULL) {
		return ret;
	}

	/* All strategies for allocation failed. */
	return NULL;
}

void *
ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
    size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
	arena_t *arena = arena_get(tsdn, arena_ind, false);
	/* NULL arena indicates arena_create. */
	assert(arena != NULL || alignment == HUGEPAGE);
	dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
	    (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
	    zero, commit, dss);
	if (have_madvise_huge && ret) {
		pages_set_thp_state(ret, size);
	}
	return ret;
}

static void *
ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
    size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
	return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
	    ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
}

bool
ehooks_default_dalloc_impl(void *addr, size_t size) {
	if (!have_dss || !extent_in_dss(addr)) {
		return extent_dalloc_mmap(addr, size);
	}
	return true;
}

static bool
ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
    bool committed, unsigned arena_ind) {
	return ehooks_default_dalloc_impl(addr, size);
}

void
ehooks_default_destroy_impl(void *addr, size_t size) {
	if (!have_dss || !extent_in_dss(addr)) {
		pages_unmap(addr, size);
	}
}

static void
ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
    bool committed, unsigned arena_ind) {
	ehooks_default_destroy_impl(addr, size);
}

bool
ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
	    length);
}

static bool
ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
    size_t offset, size_t length, unsigned arena_ind) {
	return ehooks_default_commit_impl(addr, offset, length);
}

bool
ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
	    length);
}

static bool
ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
    size_t offset, size_t length, unsigned arena_ind) {
	return ehooks_default_decommit_impl(addr, offset, length);
}

#ifdef PAGES_CAN_PURGE_LAZY
bool
ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
	    length);
}

static bool
ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
    size_t offset, size_t length, unsigned arena_ind) {
	assert(addr != NULL);
	assert((offset & PAGE_MASK) == 0);
	assert(length != 0);
	assert((length & PAGE_MASK) == 0);
	return ehooks_default_purge_lazy_impl(addr, offset, length);
}
#endif

#ifdef PAGES_CAN_PURGE_FORCED
bool
ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
	return pages_purge_forced((void *)((uintptr_t)addr +
	    (uintptr_t)offset), length);
}

static bool
ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
    size_t size, size_t offset, size_t length, unsigned arena_ind) {
	assert(addr != NULL);
	assert((offset & PAGE_MASK) == 0);
	assert(length != 0);
	assert((length & PAGE_MASK) == 0);
	return ehooks_default_purge_forced_impl(addr, offset, length);
}
#endif

bool
ehooks_default_split_impl() {
	if (!maps_coalesce) {
		/*
		 * Without retain, only whole regions can be purged (required by
		 * MEM_RELEASE on Windows) -- therefore disallow splitting.  See
		 * comments in extent_head_no_merge().
		 */
		return !opt_retain;
	}

	return false;
}

static bool
ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
    size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
	return ehooks_default_split_impl();
}

bool
ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
	assert(addr_a < addr_b);
	/*
	 * For non-DSS cases --
	 * a) W/o maps_coalesce, merge is not always allowed (Windows):
	 *   1) w/o retain, never merge (first branch below).
	 *   2) with retain, only merge extents from the same VirtualAlloc
	 *      region (in which case MEM_DECOMMIT is utilized for purging).
	 *
	 * b) With maps_coalesce, it's always possible to merge.
	 *   1) w/o retain, always allow merge (only about dirty / muzzy).
	 *   2) with retain, to preserve the SN / first-fit, merge is still
	 *      disallowed if b is a head extent, i.e. no merging across
	 *      different mmap regions.
	 *
	 * a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
	 * sanity checked in the second branch below.
	 */
	if (!maps_coalesce && !opt_retain) {
		return true;
	}
	if (config_debug) {
		edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
		    addr_a);
		bool head_a = edata_is_head_get(a);
		edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
		    addr_b);
		bool head_b = edata_is_head_get(b);
		emap_assert_mapped(tsdn, &arena_emap_global, a);
		emap_assert_mapped(tsdn, &arena_emap_global, b);
		assert(extent_neighbor_head_state_mergeable(head_a, head_b,
		    /* forward */ true));
	}
	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
		return true;
	}

	return false;
}

bool
ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
    void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
	tsdn_t *tsdn = tsdn_fetch();

	return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
}

void
ehooks_default_zero_impl(void *addr, size_t size) {
	/*
	 * By default, we try to zero out memory using OS-provided demand-zeroed
	 * pages.  If the user has specifically requested hugepages, though, we
	 * don't want to purge in the middle of a hugepage (which would break it
	 * up), so we act conservatively and use memset.
	 */
	bool needs_memset = true;
	if (opt_thp != thp_mode_always) {
		needs_memset = pages_purge_forced(addr, size);
	}
	if (needs_memset) {
		memset(addr, 0, size);
	}
}

void
ehooks_default_guard_impl(void *guard1, void *guard2) {
	pages_mark_guards(guard1, guard2);
}

void
ehooks_default_unguard_impl(void *guard1, void *guard2) {
	pages_unmark_guards(guard1, guard2);
}

const extent_hooks_t ehooks_default_extent_hooks = {
	ehooks_default_alloc,
	ehooks_default_dalloc,
	ehooks_default_destroy,
	ehooks_default_commit,
	ehooks_default_decommit,
#ifdef PAGES_CAN_PURGE_LAZY
	ehooks_default_purge_lazy,
#else
	NULL,
#endif
#ifdef PAGES_CAN_PURGE_FORCED
	ehooks_default_purge_forced,
#else
	NULL,
#endif
	ehooks_default_split,
	ehooks_default_merge
};