summaryrefslogtreecommitdiffstats
path: root/deps/jemalloc/test
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 13:40:54 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 13:40:54 +0000
commit317c0644ccf108aa23ef3fd8358bd66c2840bfc0 (patch)
treec417b3d25c86b775989cb5ac042f37611b626c8a /deps/jemalloc/test
parentInitial commit. (diff)
downloadredis-317c0644ccf108aa23ef3fd8358bd66c2840bfc0.tar.xz
redis-317c0644ccf108aa23ef3fd8358bd66c2840bfc0.zip
Adding upstream version 5:7.2.4.upstream/5%7.2.4upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'deps/jemalloc/test')
-rw-r--r--deps/jemalloc/test/analyze/prof_bias.c60
-rw-r--r--deps/jemalloc/test/analyze/rand.c276
-rw-r--r--deps/jemalloc/test/analyze/sizes.c53
-rw-r--r--deps/jemalloc/test/include/test/SFMT-alti.h186
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params.h132
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params11213.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params1279.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params132049.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params19937.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params216091.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params2281.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params4253.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params44497.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params607.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params86243.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-sse2.h157
-rw-r--r--deps/jemalloc/test/include/test/SFMT.h146
-rw-r--r--deps/jemalloc/test/include/test/arena_util.h155
-rw-r--r--deps/jemalloc/test/include/test/bench.h60
-rw-r--r--deps/jemalloc/test/include/test/bgthd.h17
-rw-r--r--deps/jemalloc/test/include/test/btalloc.h30
-rw-r--r--deps/jemalloc/test/include/test/extent_hooks.h289
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test.h.in180
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test_defs.h.in9
-rw-r--r--deps/jemalloc/test/include/test/math.h306
-rw-r--r--deps/jemalloc/test/include/test/mq.h107
-rw-r--r--deps/jemalloc/test/include/test/mtx.h21
-rw-r--r--deps/jemalloc/test/include/test/nbits.h111
-rw-r--r--deps/jemalloc/test/include/test/san.h14
-rw-r--r--deps/jemalloc/test/include/test/sleep.h1
-rw-r--r--deps/jemalloc/test/include/test/test.h583
-rw-r--r--deps/jemalloc/test/include/test/thd.h9
-rw-r--r--deps/jemalloc/test/include/test/timer.h11
-rw-r--r--deps/jemalloc/test/integration/MALLOCX_ARENA.c66
-rw-r--r--deps/jemalloc/test/integration/aligned_alloc.c157
-rw-r--r--deps/jemalloc/test/integration/allocated.c124
-rw-r--r--deps/jemalloc/test/integration/cpp/basic.cpp24
-rw-r--r--deps/jemalloc/test/integration/cpp/infallible_new_false.cpp23
-rw-r--r--deps/jemalloc/test/integration/cpp/infallible_new_false.sh8
-rw-r--r--deps/jemalloc/test/integration/cpp/infallible_new_true.cpp67
-rw-r--r--deps/jemalloc/test/integration/cpp/infallible_new_true.sh8
-rw-r--r--deps/jemalloc/test/integration/extent.c287
-rw-r--r--deps/jemalloc/test/integration/extent.sh5
-rw-r--r--deps/jemalloc/test/integration/malloc.c16
-rw-r--r--deps/jemalloc/test/integration/mallocx.c274
-rw-r--r--deps/jemalloc/test/integration/mallocx.sh5
-rw-r--r--deps/jemalloc/test/integration/overflow.c59
-rw-r--r--deps/jemalloc/test/integration/posix_memalign.c128
-rw-r--r--deps/jemalloc/test/integration/rallocx.c308
-rw-r--r--deps/jemalloc/test/integration/sdallocx.c55
-rw-r--r--deps/jemalloc/test/integration/slab_sizes.c80
-rw-r--r--deps/jemalloc/test/integration/slab_sizes.sh4
-rw-r--r--deps/jemalloc/test/integration/smallocx.c312
-rw-r--r--deps/jemalloc/test/integration/smallocx.sh5
-rw-r--r--deps/jemalloc/test/integration/thread_arena.c86
-rw-r--r--deps/jemalloc/test/integration/thread_tcache_enabled.c87
-rw-r--r--deps/jemalloc/test/integration/xallocx.c384
-rw-r--r--deps/jemalloc/test/integration/xallocx.sh5
-rw-r--r--deps/jemalloc/test/src/SFMT.c719
-rw-r--r--deps/jemalloc/test/src/btalloc.c6
-rw-r--r--deps/jemalloc/test/src/btalloc_0.c3
-rw-r--r--deps/jemalloc/test/src/btalloc_1.c3
-rw-r--r--deps/jemalloc/test/src/math.c2
-rw-r--r--deps/jemalloc/test/src/mtx.c61
-rw-r--r--deps/jemalloc/test/src/sleep.c27
-rw-r--r--deps/jemalloc/test/src/test.c234
-rw-r--r--deps/jemalloc/test/src/thd.c34
-rw-r--r--deps/jemalloc/test/src/timer.c55
-rw-r--r--deps/jemalloc/test/stress/batch_alloc.c198
-rw-r--r--deps/jemalloc/test/stress/fill_flush.c76
-rw-r--r--deps/jemalloc/test/stress/hookbench.c73
-rw-r--r--deps/jemalloc/test/stress/large_microbench.c33
-rw-r--r--deps/jemalloc/test/stress/mallctl.c74
-rw-r--r--deps/jemalloc/test/stress/microbench.c126
-rw-r--r--deps/jemalloc/test/test.sh.in80
-rw-r--r--deps/jemalloc/test/unit/SFMT.c1599
-rw-r--r--deps/jemalloc/test/unit/a0.c16
-rw-r--r--deps/jemalloc/test/unit/arena_decay.c436
-rw-r--r--deps/jemalloc/test/unit/arena_decay.sh3
-rw-r--r--deps/jemalloc/test/unit/arena_reset.c361
-rw-r--r--deps/jemalloc/test/unit/arena_reset_prof.c4
-rw-r--r--deps/jemalloc/test/unit/arena_reset_prof.sh3
-rw-r--r--deps/jemalloc/test/unit/atomic.c229
-rw-r--r--deps/jemalloc/test/unit/background_thread.c118
-rw-r--r--deps/jemalloc/test/unit/background_thread_enable.c96
-rw-r--r--deps/jemalloc/test/unit/base.c265
-rw-r--r--deps/jemalloc/test/unit/batch_alloc.c189
-rw-r--r--deps/jemalloc/test/unit/batch_alloc.sh3
-rw-r--r--deps/jemalloc/test/unit/batch_alloc_prof.c1
-rw-r--r--deps/jemalloc/test/unit/batch_alloc_prof.sh3
-rw-r--r--deps/jemalloc/test/unit/binshard.c154
-rw-r--r--deps/jemalloc/test/unit/binshard.sh3
-rw-r--r--deps/jemalloc/test/unit/bit_util.c307
-rw-r--r--deps/jemalloc/test/unit/bitmap.c343
-rw-r--r--deps/jemalloc/test/unit/buf_writer.c196
-rw-r--r--deps/jemalloc/test/unit/cache_bin.c384
-rw-r--r--deps/jemalloc/test/unit/ckh.c211
-rw-r--r--deps/jemalloc/test/unit/counter.c80
-rw-r--r--deps/jemalloc/test/unit/decay.c283
-rw-r--r--deps/jemalloc/test/unit/div.c29
-rw-r--r--deps/jemalloc/test/unit/double_free.c77
-rw-r--r--deps/jemalloc/test/unit/double_free.h1
-rw-r--r--deps/jemalloc/test/unit/edata_cache.c226
-rw-r--r--deps/jemalloc/test/unit/emitter.c533
-rw-r--r--deps/jemalloc/test/unit/extent_quantize.c141
-rw-r--r--deps/jemalloc/test/unit/fb.c954
-rw-r--r--deps/jemalloc/test/unit/fork.c141
-rw-r--r--deps/jemalloc/test/unit/fxp.c394
-rw-r--r--deps/jemalloc/test/unit/hash.c173
-rw-r--r--deps/jemalloc/test/unit/hook.c586
-rw-r--r--deps/jemalloc/test/unit/hpa.c459
-rw-r--r--deps/jemalloc/test/unit/hpa_background_thread.c188
-rw-r--r--deps/jemalloc/test/unit/hpa_background_thread.sh4
-rw-r--r--deps/jemalloc/test/unit/hpdata.c244
-rw-r--r--deps/jemalloc/test/unit/huge.c108
-rw-r--r--deps/jemalloc/test/unit/inspect.c278
-rw-r--r--deps/jemalloc/test/unit/inspect.sh5
-rw-r--r--deps/jemalloc/test/unit/junk.c195
-rw-r--r--deps/jemalloc/test/unit/junk.sh5
-rw-r--r--deps/jemalloc/test/unit/junk_alloc.c1
-rw-r--r--deps/jemalloc/test/unit/junk_alloc.sh5
-rw-r--r--deps/jemalloc/test/unit/junk_free.c1
-rw-r--r--deps/jemalloc/test/unit/junk_free.sh5
-rw-r--r--deps/jemalloc/test/unit/log.c198
-rw-r--r--deps/jemalloc/test/unit/mallctl.c1274
-rw-r--r--deps/jemalloc/test/unit/malloc_conf_2.c29
-rw-r--r--deps/jemalloc/test/unit/malloc_conf_2.sh1
-rw-r--r--deps/jemalloc/test/unit/malloc_io.c268
-rw-r--r--deps/jemalloc/test/unit/math.c390
-rw-r--r--deps/jemalloc/test/unit/mpsc_queue.c304
-rw-r--r--deps/jemalloc/test/unit/mq.c89
-rw-r--r--deps/jemalloc/test/unit/mtx.c57
-rw-r--r--deps/jemalloc/test/unit/nstime.c252
-rw-r--r--deps/jemalloc/test/unit/oversize_threshold.c133
-rw-r--r--deps/jemalloc/test/unit/pa.c126
-rw-r--r--deps/jemalloc/test/unit/pack.c166
-rw-r--r--deps/jemalloc/test/unit/pack.sh4
-rw-r--r--deps/jemalloc/test/unit/pages.c29
-rw-r--r--deps/jemalloc/test/unit/peak.c47
-rw-r--r--deps/jemalloc/test/unit/ph.c330
-rw-r--r--deps/jemalloc/test/unit/prng.c189
-rw-r--r--deps/jemalloc/test/unit/prof_accum.c84
-rw-r--r--deps/jemalloc/test/unit/prof_accum.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_active.c119
-rw-r--r--deps/jemalloc/test/unit/prof_active.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_gdump.c77
-rw-r--r--deps/jemalloc/test/unit/prof_gdump.sh6
-rw-r--r--deps/jemalloc/test/unit/prof_hook.c169
-rw-r--r--deps/jemalloc/test/unit/prof_hook.sh6
-rw-r--r--deps/jemalloc/test/unit/prof_idump.c57
-rw-r--r--deps/jemalloc/test/unit/prof_idump.sh8
-rw-r--r--deps/jemalloc/test/unit/prof_log.c151
-rw-r--r--deps/jemalloc/test/unit/prof_log.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_mdump.c216
-rw-r--r--deps/jemalloc/test/unit/prof_mdump.sh6
-rw-r--r--deps/jemalloc/test/unit/prof_recent.c678
-rw-r--r--deps/jemalloc/test/unit/prof_recent.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_reset.c266
-rw-r--r--deps/jemalloc/test/unit/prof_reset.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_stats.c151
-rw-r--r--deps/jemalloc/test/unit/prof_stats.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_sys_thread_name.c77
-rw-r--r--deps/jemalloc/test/unit/prof_sys_thread_name.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_tctx.c48
-rw-r--r--deps/jemalloc/test/unit/prof_tctx.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_thread_name.c122
-rw-r--r--deps/jemalloc/test/unit/prof_thread_name.sh5
-rw-r--r--deps/jemalloc/test/unit/psset.c748
-rw-r--r--deps/jemalloc/test/unit/ql.c317
-rw-r--r--deps/jemalloc/test/unit/qr.c243
-rw-r--r--deps/jemalloc/test/unit/rb.c1019
-rw-r--r--deps/jemalloc/test/unit/retained.c188
-rw-r--r--deps/jemalloc/test/unit/rtree.c289
-rw-r--r--deps/jemalloc/test/unit/safety_check.c163
-rw-r--r--deps/jemalloc/test/unit/safety_check.sh5
-rw-r--r--deps/jemalloc/test/unit/san.c207
-rw-r--r--deps/jemalloc/test/unit/san.sh3
-rw-r--r--deps/jemalloc/test/unit/san_bump.c111
-rw-r--r--deps/jemalloc/test/unit/sc.c33
-rw-r--r--deps/jemalloc/test/unit/sec.c634
-rw-r--r--deps/jemalloc/test/unit/seq.c95
-rw-r--r--deps/jemalloc/test/unit/size_check.c79
-rw-r--r--deps/jemalloc/test/unit/size_check.sh5
-rw-r--r--deps/jemalloc/test/unit/size_classes.c188
-rw-r--r--deps/jemalloc/test/unit/slab.c39
-rw-r--r--deps/jemalloc/test/unit/smoothstep.c102
-rw-r--r--deps/jemalloc/test/unit/spin.c18
-rw-r--r--deps/jemalloc/test/unit/stats.c431
-rw-r--r--deps/jemalloc/test/unit/stats_print.c999
-rw-r--r--deps/jemalloc/test/unit/sz.c66
-rw-r--r--deps/jemalloc/test/unit/tcache_max.c175
-rw-r--r--deps/jemalloc/test/unit/tcache_max.sh3
-rw-r--r--deps/jemalloc/test/unit/test_hooks.c38
-rw-r--r--deps/jemalloc/test/unit/thread_event.c34
-rw-r--r--deps/jemalloc/test/unit/thread_event.sh5
-rw-r--r--deps/jemalloc/test/unit/ticker.c100
-rw-r--r--deps/jemalloc/test/unit/tsd.c274
-rw-r--r--deps/jemalloc/test/unit/uaf.c262
-rw-r--r--deps/jemalloc/test/unit/witness.c280
-rw-r--r--deps/jemalloc/test/unit/zero.c59
-rw-r--r--deps/jemalloc/test/unit/zero.sh5
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_abort.c26
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_abort.sh3
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_alloc.c48
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_alloc.sh3
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_free.c33
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_free.sh3
-rw-r--r--deps/jemalloc/test/unit/zero_reallocs.c40
-rw-r--r--deps/jemalloc/test/unit/zero_reallocs.sh3
209 files changed, 31692 insertions, 0 deletions
diff --git a/deps/jemalloc/test/analyze/prof_bias.c b/deps/jemalloc/test/analyze/prof_bias.c
new file mode 100644
index 0000000..a96ca94
--- /dev/null
+++ b/deps/jemalloc/test/analyze/prof_bias.c
@@ -0,0 +1,60 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * This is a helper utility, only meant to be run manually (and, for example,
+ * doesn't check for failures, try to skip execution in non-prof modes, etc.).
+ * It runs, allocates objects of two different sizes from the same stack trace,
+ * and exits.
+ *
+ * The idea is that some human operator will run it like:
+ * MALLOC_CONF="prof:true,prof_final:true" test/analyze/prof_bias
+ * and manually inspect the results.
+ *
+ * The results should be:
+ * jeprof --text test/analyze/prof_bias --inuse_space jeprof.<pid>.0.f.heap:
+ * around 1024 MB
+ * jeprof --text test/analyze/prof_bias --inuse_objects jeprof.<pid>.0.f.heap:
+ * around 33554448 = 16 + 32 * 1024 * 1024
+ *
+ * And, if prof_accum is on:
+ * jeprof --text test/analyze/prof_bias --alloc_space jeprof.<pid>.0.f.heap:
+ * around 2048 MB
+ * jeprof --text test/analyze/prof_bias --alloc_objects jeprof.<pid>.0.f.heap:
+ * around 67108896 = 2 * (16 + 32 * 1024 * 1024)
+ */
+
+static void
+mock_backtrace(void **vec, unsigned *len, unsigned max_len) {
+ *len = 4;
+ vec[0] = (void *)0x111;
+ vec[1] = (void *)0x222;
+ vec[2] = (void *)0x333;
+ vec[3] = (void *)0x444;
+}
+
+static void
+do_allocs(size_t sz, size_t cnt, bool do_frees) {
+ for (size_t i = 0; i < cnt; i++) {
+ void *ptr = mallocx(sz, 0);
+ assert_ptr_not_null(ptr, "Unexpected mallocx failure");
+ if (do_frees) {
+ dallocx(ptr, 0);
+ }
+ }
+}
+
+int
+main(void) {
+ size_t lg_prof_sample_local = 19;
+ int err = mallctl("prof.reset", NULL, NULL,
+ (void *)&lg_prof_sample_local, sizeof(lg_prof_sample_local));
+ assert(err == 0);
+
+ prof_backtrace_hook_set(mock_backtrace);
+ do_allocs(16, 32 * 1024 * 1024, /* do_frees */ true);
+ do_allocs(32 * 1024* 1024, 16, /* do_frees */ true);
+ do_allocs(16, 32 * 1024 * 1024, /* do_frees */ false);
+ do_allocs(32 * 1024* 1024, 16, /* do_frees */ false);
+
+ return 0;
+}
diff --git a/deps/jemalloc/test/analyze/rand.c b/deps/jemalloc/test/analyze/rand.c
new file mode 100644
index 0000000..bb20b06
--- /dev/null
+++ b/deps/jemalloc/test/analyze/rand.c
@@ -0,0 +1,276 @@
+#include "test/jemalloc_test.h"
+
+/******************************************************************************/
+
+/*
+ * General purpose tool for examining random number distributions.
+ *
+ * Input -
+ * (a) a random number generator, and
+ * (b) the buckets:
+ * (1) number of buckets,
+ * (2) width of each bucket, in log scale,
+ * (3) expected mean and stddev of the count of random numbers in each
+ * bucket, and
+ * (c) number of iterations to invoke the generator.
+ *
+ * The program generates the specified amount of random numbers, and assess how
+ * well they conform to the expectations: for each bucket, output -
+ * (a) the (given) expected mean and stddev,
+ * (b) the actual count and any interesting level of deviation:
+ * (1) ~68% buckets should show no interesting deviation, meaning a
+ * deviation less than stddev from the expectation;
+ * (2) ~27% buckets should show '+' / '-', meaning a deviation in the range
+ * of [stddev, 2 * stddev) from the expectation;
+ * (3) ~4% buckets should show '++' / '--', meaning a deviation in the
+ * range of [2 * stddev, 3 * stddev) from the expectation; and
+ * (4) less than 0.3% buckets should show more than two '+'s / '-'s.
+ *
+ * Technical remarks:
+ * (a) The generator is expected to output uint64_t numbers, so you might need
+ * to define a wrapper.
+ * (b) The buckets must be of equal width and the lowest bucket starts at
+ * [0, 2^lg_bucket_width - 1).
+ * (c) Any generated number >= n_bucket * 2^lg_bucket_width will be counted
+ * towards the last bucket; the expected mean and stddev provided should
+ * also reflect that.
+ * (d) The number of iterations is advised to be determined so that the bucket
+ * with the minimal expected proportion gets a sufficient count.
+ */
+
+static void
+fill(size_t a[], const size_t n, const size_t k) {
+ for (size_t i = 0; i < n; ++i) {
+ a[i] = k;
+ }
+}
+
+static void
+collect_buckets(uint64_t (*gen)(void *), void *opaque, size_t buckets[],
+ const size_t n_bucket, const size_t lg_bucket_width, const size_t n_iter) {
+ for (size_t i = 0; i < n_iter; ++i) {
+ uint64_t num = gen(opaque);
+ uint64_t bucket_id = num >> lg_bucket_width;
+ if (bucket_id >= n_bucket) {
+ bucket_id = n_bucket - 1;
+ }
+ ++buckets[bucket_id];
+ }
+}
+
+static void
+print_buckets(const size_t buckets[], const size_t means[],
+ const size_t stddevs[], const size_t n_bucket) {
+ for (size_t i = 0; i < n_bucket; ++i) {
+ malloc_printf("%zu:\tmean = %zu,\tstddev = %zu,\tbucket = %zu",
+ i, means[i], stddevs[i], buckets[i]);
+
+ /* Make sure there's no overflow. */
+ assert(buckets[i] + stddevs[i] >= stddevs[i]);
+ assert(means[i] + stddevs[i] >= stddevs[i]);
+
+ if (buckets[i] + stddevs[i] <= means[i]) {
+ malloc_write(" ");
+ for (size_t t = means[i] - buckets[i]; t >= stddevs[i];
+ t -= stddevs[i]) {
+ malloc_write("-");
+ }
+ } else if (buckets[i] >= means[i] + stddevs[i]) {
+ malloc_write(" ");
+ for (size_t t = buckets[i] - means[i]; t >= stddevs[i];
+ t -= stddevs[i]) {
+ malloc_write("+");
+ }
+ }
+ malloc_write("\n");
+ }
+}
+
+static void
+bucket_analysis(uint64_t (*gen)(void *), void *opaque, size_t buckets[],
+ const size_t means[], const size_t stddevs[], const size_t n_bucket,
+ const size_t lg_bucket_width, const size_t n_iter) {
+ for (size_t i = 1; i <= 3; ++i) {
+ malloc_printf("round %zu\n", i);
+ fill(buckets, n_bucket, 0);
+ collect_buckets(gen, opaque, buckets, n_bucket,
+ lg_bucket_width, n_iter);
+ print_buckets(buckets, means, stddevs, n_bucket);
+ }
+}
+
+/* (Recommended) minimal bucket mean. */
+#define MIN_BUCKET_MEAN 10000
+
+/******************************************************************************/
+
+/* Uniform random number generator. */
+
+typedef struct uniform_gen_arg_s uniform_gen_arg_t;
+struct uniform_gen_arg_s {
+ uint64_t state;
+ const unsigned lg_range;
+};
+
+static uint64_t
+uniform_gen(void *opaque) {
+ uniform_gen_arg_t *arg = (uniform_gen_arg_t *)opaque;
+ return prng_lg_range_u64(&arg->state, arg->lg_range);
+}
+
+TEST_BEGIN(test_uniform) {
+#define LG_N_BUCKET 5
+#define N_BUCKET (1 << LG_N_BUCKET)
+
+#define QUOTIENT_CEIL(n, d) (((n) - 1) / (d) + 1)
+
+ const unsigned lg_range_test = 25;
+
+ /*
+ * Mathematical tricks to guarantee that both mean and stddev are
+ * integers, and that the minimal bucket mean is at least
+ * MIN_BUCKET_MEAN.
+ */
+ const size_t q = 1 << QUOTIENT_CEIL(LG_CEIL(QUOTIENT_CEIL(
+ MIN_BUCKET_MEAN, N_BUCKET * (N_BUCKET - 1))), 2);
+ const size_t stddev = (N_BUCKET - 1) * q;
+ const size_t mean = N_BUCKET * stddev * q;
+ const size_t n_iter = N_BUCKET * mean;
+
+ size_t means[N_BUCKET];
+ fill(means, N_BUCKET, mean);
+ size_t stddevs[N_BUCKET];
+ fill(stddevs, N_BUCKET, stddev);
+
+ uniform_gen_arg_t arg = {(uint64_t)(uintptr_t)&lg_range_test,
+ lg_range_test};
+ size_t buckets[N_BUCKET];
+ assert_zu_ge(lg_range_test, LG_N_BUCKET, "");
+ const size_t lg_bucket_width = lg_range_test - LG_N_BUCKET;
+
+ bucket_analysis(uniform_gen, &arg, buckets, means, stddevs,
+ N_BUCKET, lg_bucket_width, n_iter);
+
+#undef LG_N_BUCKET
+#undef N_BUCKET
+#undef QUOTIENT_CEIL
+}
+TEST_END
+
+/******************************************************************************/
+
+/* Geometric random number generator; compiled only when prof is on. */
+
+#ifdef JEMALLOC_PROF
+
+/*
+ * Fills geometric proportions and returns the minimal proportion. See
+ * comments in test_prof_sample for explanations for n_divide.
+ */
+static double
+fill_geometric_proportions(double proportions[], const size_t n_bucket,
+ const size_t n_divide) {
+ assert(n_bucket > 0);
+ assert(n_divide > 0);
+ double x = 1.;
+ for (size_t i = 0; i < n_bucket; ++i) {
+ if (i == n_bucket - 1) {
+ proportions[i] = x;
+ } else {
+ double y = x * exp(-1. / n_divide);
+ proportions[i] = x - y;
+ x = y;
+ }
+ }
+ /*
+ * The minimal proportion is the smaller one of the last two
+ * proportions for geometric distribution.
+ */
+ double min_proportion = proportions[n_bucket - 1];
+ if (n_bucket >= 2 && proportions[n_bucket - 2] < min_proportion) {
+ min_proportion = proportions[n_bucket - 2];
+ }
+ return min_proportion;
+}
+
+static size_t
+round_to_nearest(const double x) {
+ return (size_t)(x + .5);
+}
+
+static void
+fill_references(size_t means[], size_t stddevs[], const double proportions[],
+ const size_t n_bucket, const size_t n_iter) {
+ for (size_t i = 0; i < n_bucket; ++i) {
+ double x = n_iter * proportions[i];
+ means[i] = round_to_nearest(x);
+ stddevs[i] = round_to_nearest(sqrt(x * (1. - proportions[i])));
+ }
+}
+
+static uint64_t
+prof_sample_gen(void *opaque) {
+ return prof_sample_new_event_wait((tsd_t *)opaque) - 1;
+}
+
+#endif /* JEMALLOC_PROF */
+
+TEST_BEGIN(test_prof_sample) {
+ test_skip_if(!config_prof);
+#ifdef JEMALLOC_PROF
+
+/* Number of divisions within [0, mean). */
+#define LG_N_DIVIDE 3
+#define N_DIVIDE (1 << LG_N_DIVIDE)
+
+/* Coverage of buckets in terms of multiples of mean. */
+#define LG_N_MULTIPLY 2
+#define N_GEO_BUCKET (N_DIVIDE << LG_N_MULTIPLY)
+
+ test_skip_if(!opt_prof);
+
+ size_t lg_prof_sample_test = 25;
+
+ size_t lg_prof_sample_orig = lg_prof_sample;
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample_test,
+ sizeof(size_t)), 0, "");
+ malloc_printf("lg_prof_sample = %zu\n", lg_prof_sample_test);
+
+ double proportions[N_GEO_BUCKET + 1];
+ const double min_proportion = fill_geometric_proportions(proportions,
+ N_GEO_BUCKET + 1, N_DIVIDE);
+ const size_t n_iter = round_to_nearest(MIN_BUCKET_MEAN /
+ min_proportion);
+ size_t means[N_GEO_BUCKET + 1];
+ size_t stddevs[N_GEO_BUCKET + 1];
+ fill_references(means, stddevs, proportions, N_GEO_BUCKET + 1, n_iter);
+
+ tsd_t *tsd = tsd_fetch();
+ assert_ptr_not_null(tsd, "");
+ size_t buckets[N_GEO_BUCKET + 1];
+ assert_zu_ge(lg_prof_sample, LG_N_DIVIDE, "");
+ const size_t lg_bucket_width = lg_prof_sample - LG_N_DIVIDE;
+
+ bucket_analysis(prof_sample_gen, tsd, buckets, means, stddevs,
+ N_GEO_BUCKET + 1, lg_bucket_width, n_iter);
+
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample_orig,
+ sizeof(size_t)), 0, "");
+
+#undef LG_N_DIVIDE
+#undef N_DIVIDE
+#undef LG_N_MULTIPLY
+#undef N_GEO_BUCKET
+
+#endif /* JEMALLOC_PROF */
+}
+TEST_END
+
+/******************************************************************************/
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_uniform,
+ test_prof_sample);
+}
diff --git a/deps/jemalloc/test/analyze/sizes.c b/deps/jemalloc/test/analyze/sizes.c
new file mode 100644
index 0000000..44c9de5
--- /dev/null
+++ b/deps/jemalloc/test/analyze/sizes.c
@@ -0,0 +1,53 @@
+#include "test/jemalloc_test.h"
+
+#include <stdio.h>
+
+/*
+ * Print the sizes of various important core data structures. OK, I guess this
+ * isn't really a "stress" test, but it does give useful information about
+ * low-level performance characteristics, as the other things in this directory
+ * do.
+ */
+
+static void
+do_print(const char *name, size_t sz_bytes) {
+ const char *sizes[] = {"bytes", "KB", "MB", "GB", "TB", "PB", "EB",
+ "ZB"};
+ size_t sizes_max = sizeof(sizes)/sizeof(sizes[0]);
+
+ size_t ind = 0;
+ double sz = sz_bytes;
+ while (sz >= 1024 && ind < sizes_max - 1) {
+ sz /= 1024;
+ ind++;
+ }
+ if (ind == 0) {
+ printf("%-20s: %zu bytes\n", name, sz_bytes);
+ } else {
+ printf("%-20s: %f %s\n", name, sz, sizes[ind]);
+ }
+}
+
+int
+main() {
+#define P(type) \
+ do_print(#type, sizeof(type))
+ P(arena_t);
+ P(arena_stats_t);
+ P(base_t);
+ P(decay_t);
+ P(edata_t);
+ P(ecache_t);
+ P(eset_t);
+ P(malloc_mutex_t);
+ P(prof_tctx_t);
+ P(prof_gctx_t);
+ P(prof_tdata_t);
+ P(rtree_t);
+ P(rtree_leaf_elm_t);
+ P(slab_data_t);
+ P(tcache_t);
+ P(tcache_slow_t);
+ P(tsd_t);
+#undef P
+}
diff --git a/deps/jemalloc/test/include/test/SFMT-alti.h b/deps/jemalloc/test/include/test/SFMT-alti.h
new file mode 100644
index 0000000..a1885db
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-alti.h
@@ -0,0 +1,186 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT-alti.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ * pseudorandom number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ */
+
+#ifndef SFMT_ALTI_H
+#define SFMT_ALTI_H
+
+/**
+ * This function represents the recursion formula in AltiVec and BIG ENDIAN.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @return output
+ */
+JEMALLOC_ALWAYS_INLINE
+vector unsigned int vec_recursion(vector unsigned int a,
+ vector unsigned int b,
+ vector unsigned int c,
+ vector unsigned int d) {
+
+ const vector unsigned int sl1 = ALTI_SL1;
+ const vector unsigned int sr1 = ALTI_SR1;
+#ifdef ONLY64
+ const vector unsigned int mask = ALTI_MSK64;
+ const vector unsigned char perm_sl = ALTI_SL2_PERM64;
+ const vector unsigned char perm_sr = ALTI_SR2_PERM64;
+#else
+ const vector unsigned int mask = ALTI_MSK;
+ const vector unsigned char perm_sl = ALTI_SL2_PERM;
+ const vector unsigned char perm_sr = ALTI_SR2_PERM;
+#endif
+ vector unsigned int v, w, x, y, z;
+ x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
+ v = a;
+ y = vec_sr(b, sr1);
+ z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
+ w = vec_sl(d, sl1);
+ z = vec_xor(z, w);
+ y = vec_and(y, mask);
+ v = vec_xor(v, x);
+ z = vec_xor(z, y);
+ z = vec_xor(z, v);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+static inline void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ vector unsigned int r, r1, r2;
+
+ r1 = ctx->sfmt[N - 2].s;
+ r2 = ctx->sfmt[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
+ ctx->sfmt[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
+ ctx->sfmt[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ vector unsigned int r, r1, r2;
+
+ r1 = ctx->sfmt[N - 2].s;
+ r2 = ctx->sfmt[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ ctx->sfmt[j].s = array[j + size - N].s;
+ }
+ for (; i < size; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ ctx->sfmt[j++].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#ifndef ONLY64
+#if defined(__APPLE__)
+#define ALTI_SWAP (vector unsigned char) \
+ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
+#else
+#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
+#endif
+/**
+ * This function swaps high and low 32-bit of 64-bit integers in user
+ * specified array.
+ *
+ * @param array an 128-bit array to be swaped.
+ * @param size size of 128-bit array.
+ */
+static inline void swap(w128_t *array, int size) {
+ int i;
+ const vector unsigned char perm = ALTI_SWAP;
+
+ for (i = 0; i < size; i++) {
+ array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
+ }
+}
+#endif
+
+#endif
diff --git a/deps/jemalloc/test/include/test/SFMT-params.h b/deps/jemalloc/test/include/test/SFMT-params.h
new file mode 100644
index 0000000..ade6622
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params.h
@@ -0,0 +1,132 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS_H
+#define SFMT_PARAMS_H
+
+#if !defined(MEXP)
+#ifdef __GNUC__
+ #warning "MEXP is not defined. I assume MEXP is 19937."
+#endif
+ #define MEXP 19937
+#endif
+/*-----------------
+ BASIC DEFINITIONS
+ -----------------*/
+/** Mersenne Exponent. The period of the sequence
+ * is a multiple of 2^MEXP-1.
+ * #define MEXP 19937 */
+/** SFMT generator has an internal state array of 128-bit integers,
+ * and N is its size. */
+#define N (MEXP / 128 + 1)
+/** N32 is the size of internal state array when regarded as an array
+ * of 32-bit integers.*/
+#define N32 (N * 4)
+/** N64 is the size of internal state array when regarded as an array
+ * of 64-bit integers.*/
+#define N64 (N * 2)
+
+/*----------------------
+ the parameters of SFMT
+ following definitions are in paramsXXXX.h file.
+ ----------------------*/
+/** the pick up position of the array.
+#define POS1 122
+*/
+
+/** the parameter of shift left as four 32-bit registers.
+#define SL1 18
+ */
+
+/** the parameter of shift left as one 128-bit register.
+ * The 128-bit integer is shifted by (SL2 * 8) bits.
+#define SL2 1
+*/
+
+/** the parameter of shift right as four 32-bit registers.
+#define SR1 11
+*/
+
+/** the parameter of shift right as one 128-bit register.
+ * The 128-bit integer is shifted by (SL2 * 8) bits.
+#define SR2 1
+*/
+
+/** A bitmask, used in the recursion. These parameters are introduced
+ * to break symmetry of SIMD.
+#define MSK1 0xdfffffefU
+#define MSK2 0xddfecb7fU
+#define MSK3 0xbffaffffU
+#define MSK4 0xbffffff6U
+*/
+
+/** These definitions are part of a 128-bit period certification vector.
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0xc98e126aU
+*/
+
+#if MEXP == 607
+ #include "test/SFMT-params607.h"
+#elif MEXP == 1279
+ #include "test/SFMT-params1279.h"
+#elif MEXP == 2281
+ #include "test/SFMT-params2281.h"
+#elif MEXP == 4253
+ #include "test/SFMT-params4253.h"
+#elif MEXP == 11213
+ #include "test/SFMT-params11213.h"
+#elif MEXP == 19937
+ #include "test/SFMT-params19937.h"
+#elif MEXP == 44497
+ #include "test/SFMT-params44497.h"
+#elif MEXP == 86243
+ #include "test/SFMT-params86243.h"
+#elif MEXP == 132049
+ #include "test/SFMT-params132049.h"
+#elif MEXP == 216091
+ #include "test/SFMT-params216091.h"
+#else
+#ifdef __GNUC__
+ #error "MEXP is not valid."
+ #undef MEXP
+#else
+ #undef MEXP
+#endif
+
+#endif
+
+#endif /* SFMT_PARAMS_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params11213.h b/deps/jemalloc/test/include/test/SFMT-params11213.h
new file mode 100644
index 0000000..2994bd2
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params11213.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS11213_H
+#define SFMT_PARAMS11213_H
+
+#define POS1 68
+#define SL1 14
+#define SL2 3
+#define SR1 7
+#define SR2 3
+#define MSK1 0xeffff7fbU
+#define MSK2 0xffffffefU
+#define MSK3 0xdfdfbfffU
+#define MSK4 0x7fffdbfdU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xe8148000U
+#define PARITY4 0xd0c7afa3U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd"
+
+#endif /* SFMT_PARAMS11213_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params1279.h b/deps/jemalloc/test/include/test/SFMT-params1279.h
new file mode 100644
index 0000000..d7959f9
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params1279.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS1279_H
+#define SFMT_PARAMS1279_H
+
+#define POS1 7
+#define SL1 14
+#define SL2 3
+#define SR1 5
+#define SR2 1
+#define MSK1 0xf7fefffdU
+#define MSK2 0x7fefcfffU
+#define MSK3 0xaff3ef3fU
+#define MSK4 0xb5ffff7fU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x20000000U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f"
+
+#endif /* SFMT_PARAMS1279_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params132049.h b/deps/jemalloc/test/include/test/SFMT-params132049.h
new file mode 100644
index 0000000..a1dcec3
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params132049.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS132049_H
+#define SFMT_PARAMS132049_H
+
+#define POS1 110
+#define SL1 19
+#define SL2 1
+#define SR1 21
+#define SR2 1
+#define MSK1 0xffffbb5fU
+#define MSK2 0xfb6ebf95U
+#define MSK3 0xfffefffaU
+#define MSK4 0xcff77fffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xcb520000U
+#define PARITY4 0xc7e91c7dU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
+
+#endif /* SFMT_PARAMS132049_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params19937.h b/deps/jemalloc/test/include/test/SFMT-params19937.h
new file mode 100644
index 0000000..fb92b4c
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params19937.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS19937_H
+#define SFMT_PARAMS19937_H
+
+#define POS1 122
+#define SL1 18
+#define SL2 1
+#define SR1 11
+#define SR2 1
+#define MSK1 0xdfffffefU
+#define MSK2 0xddfecb7fU
+#define MSK3 0xbffaffffU
+#define MSK4 0xbffffff6U
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x13c9e684U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6"
+
+#endif /* SFMT_PARAMS19937_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params216091.h b/deps/jemalloc/test/include/test/SFMT-params216091.h
new file mode 100644
index 0000000..125ce28
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params216091.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS216091_H
+#define SFMT_PARAMS216091_H
+
+#define POS1 627
+#define SL1 11
+#define SL2 3
+#define SR1 10
+#define SR2 1
+#define MSK1 0xbff7bff7U
+#define MSK2 0xbfffffffU
+#define MSK3 0xbffffa7fU
+#define MSK4 0xffddfbfbU
+#define PARITY1 0xf8000001U
+#define PARITY2 0x89e80709U
+#define PARITY3 0x3bd2b64bU
+#define PARITY4 0x0c64b1e4U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb"
+
+#endif /* SFMT_PARAMS216091_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params2281.h b/deps/jemalloc/test/include/test/SFMT-params2281.h
new file mode 100644
index 0000000..0ef85c4
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params2281.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS2281_H
+#define SFMT_PARAMS2281_H
+
+#define POS1 12
+#define SL1 19
+#define SL2 1
+#define SR1 5
+#define SR2 1
+#define MSK1 0xbff7ffbfU
+#define MSK2 0xfdfffffeU
+#define MSK3 0xf7ffef7fU
+#define MSK4 0xf2f7cbbfU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x41dfa600U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf"
+
+#endif /* SFMT_PARAMS2281_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params4253.h b/deps/jemalloc/test/include/test/SFMT-params4253.h
new file mode 100644
index 0000000..9f07bc6
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params4253.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS4253_H
+#define SFMT_PARAMS4253_H
+
+#define POS1 17
+#define SL1 20
+#define SL2 1
+#define SR1 7
+#define SR2 1
+#define MSK1 0x9f7bffffU
+#define MSK2 0x9fffff5fU
+#define MSK3 0x3efffffbU
+#define MSK4 0xfffff7bbU
+#define PARITY1 0xa8000001U
+#define PARITY2 0xaf5390a3U
+#define PARITY3 0xb740b3f8U
+#define PARITY4 0x6c11486dU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb"
+
+#endif /* SFMT_PARAMS4253_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params44497.h b/deps/jemalloc/test/include/test/SFMT-params44497.h
new file mode 100644
index 0000000..85598fe
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params44497.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS44497_H
+#define SFMT_PARAMS44497_H
+
+#define POS1 330
+#define SL1 5
+#define SL2 3
+#define SR1 9
+#define SR2 3
+#define MSK1 0xeffffffbU
+#define MSK2 0xdfbebfffU
+#define MSK3 0xbfbf7befU
+#define MSK4 0x9ffd7bffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xa3ac4000U
+#define PARITY4 0xecc1327aU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff"
+
+#endif /* SFMT_PARAMS44497_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params607.h b/deps/jemalloc/test/include/test/SFMT-params607.h
new file mode 100644
index 0000000..bc76485
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params607.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS607_H
+#define SFMT_PARAMS607_H
+
+#define POS1 2
+#define SL1 15
+#define SL2 3
+#define SR1 13
+#define SR2 3
+#define MSK1 0xfdff37ffU
+#define MSK2 0xef7f3f7dU
+#define MSK3 0xff777b7dU
+#define MSK4 0x7ff7fb2fU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x5986f054U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f"
+
+#endif /* SFMT_PARAMS607_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params86243.h b/deps/jemalloc/test/include/test/SFMT-params86243.h
new file mode 100644
index 0000000..5e4d783
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params86243.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS86243_H
+#define SFMT_PARAMS86243_H
+
+#define POS1 366
+#define SL1 6
+#define SL2 7
+#define SR1 19
+#define SR2 1
+#define MSK1 0xfdbffbffU
+#define MSK2 0xbff7ff3fU
+#define MSK3 0xfd77efffU
+#define MSK4 0xbf9ff3ffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0xe9528d85U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
+ #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
+
+#endif /* SFMT_PARAMS86243_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-sse2.h b/deps/jemalloc/test/include/test/SFMT-sse2.h
new file mode 100644
index 0000000..169ad55
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-sse2.h
@@ -0,0 +1,157 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT-sse2.h
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * @note We assume LITTLE ENDIAN in this file
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software, see LICENSE.txt
+ */
+
+#ifndef SFMT_SSE2_H
+#define SFMT_SSE2_H
+
+/**
+ * This function represents the recursion formula.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @param mask 128-bit mask
+ * @return output
+ */
+JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
+ __m128i c, __m128i d, __m128i mask) {
+ __m128i v, x, y, z;
+
+ x = _mm_load_si128(a);
+ y = _mm_srli_epi32(*b, SR1);
+ z = _mm_srli_si128(c, SR2);
+ v = _mm_slli_epi32(d, SL1);
+ z = _mm_xor_si128(z, x);
+ z = _mm_xor_si128(z, v);
+ x = _mm_slli_si128(x, SL2);
+ y = _mm_and_si128(y, mask);
+ z = _mm_xor_si128(z, x);
+ z = _mm_xor_si128(z, y);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+static inline void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ __m128i r, r1, r2, mask;
+ mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
+
+ r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
+ r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
+ for (i = 0; i < N - POS1; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
+ mask);
+ _mm_store_si128(&ctx->sfmt[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&ctx->sfmt[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ __m128i r, r1, r2, mask;
+ mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
+
+ r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
+ r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
+ for (i = 0; i < N - POS1; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ r = _mm_load_si128(&array[j + size - N].si);
+ _mm_store_si128(&ctx->sfmt[j].si, r);
+ }
+ for (; i < size; i++) {
+ r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ _mm_store_si128(&ctx->sfmt[j++].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#endif
diff --git a/deps/jemalloc/test/include/test/SFMT.h b/deps/jemalloc/test/include/test/SFMT.h
new file mode 100644
index 0000000..863fc55
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT.h
@@ -0,0 +1,146 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
+ * number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ *
+ * @note We assume that your system has inttypes.h. If your system
+ * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t,
+ * and you have to define PRIu64 and PRIx64 in this file as follows:
+ * @verbatim
+ typedef unsigned int uint32_t
+ typedef unsigned long long uint64_t
+ #define PRIu64 "llu"
+ #define PRIx64 "llx"
+@endverbatim
+ * uint32_t must be exactly 32-bit unsigned integer type (no more, no
+ * less), and uint64_t must be exactly 64-bit unsigned integer type.
+ * PRIu64 and PRIx64 are used for printf function to print 64-bit
+ * unsigned int and 64-bit unsigned int in hexadecimal format.
+ */
+
+#ifndef SFMT_H
+#define SFMT_H
+
+typedef struct sfmt_s sfmt_t;
+
+uint32_t gen_rand32(sfmt_t *ctx);
+uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit);
+uint64_t gen_rand64(sfmt_t *ctx);
+uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit);
+void fill_array32(sfmt_t *ctx, uint32_t *array, int size);
+void fill_array64(sfmt_t *ctx, uint64_t *array, int size);
+sfmt_t *init_gen_rand(uint32_t seed);
+sfmt_t *init_by_array(uint32_t *init_key, int key_length);
+void fini_gen_rand(sfmt_t *ctx);
+const char *get_idstring(void);
+int get_min_array_size32(void);
+int get_min_array_size64(void);
+
+/* These real versions are due to Isaku Wada */
+/** generates a random number on [0,1]-real-interval */
+static inline double to_real1(uint32_t v) {
+ return v * (1.0/4294967295.0);
+ /* divided by 2^32-1 */
+}
+
+/** generates a random number on [0,1]-real-interval */
+static inline double genrand_real1(sfmt_t *ctx) {
+ return to_real1(gen_rand32(ctx));
+}
+
+/** generates a random number on [0,1)-real-interval */
+static inline double to_real2(uint32_t v) {
+ return v * (1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/** generates a random number on [0,1)-real-interval */
+static inline double genrand_real2(sfmt_t *ctx) {
+ return to_real2(gen_rand32(ctx));
+}
+
+/** generates a random number on (0,1)-real-interval */
+static inline double to_real3(uint32_t v) {
+ return (((double)v) + 0.5)*(1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/** generates a random number on (0,1)-real-interval */
+static inline double genrand_real3(sfmt_t *ctx) {
+ return to_real3(gen_rand32(ctx));
+}
+/** These real versions are due to Isaku Wada */
+
+/** generates a random number on [0,1) with 53-bit resolution*/
+static inline double to_res53(uint64_t v) {
+ return v * (1.0/18446744073709551616.0L);
+}
+
+/** generates a random number on [0,1) with 53-bit resolution from two
+ * 32 bit integers */
+static inline double to_res53_mix(uint32_t x, uint32_t y) {
+ return to_res53(x | ((uint64_t)y << 32));
+}
+
+/** generates a random number on [0,1) with 53-bit resolution
+ */
+static inline double genrand_res53(sfmt_t *ctx) {
+ return to_res53(gen_rand64(ctx));
+}
+
+/** generates a random number on [0,1) with 53-bit resolution
+ using 32bit integer.
+ */
+static inline double genrand_res53_mix(sfmt_t *ctx) {
+ uint32_t x, y;
+
+ x = gen_rand32(ctx);
+ y = gen_rand32(ctx);
+ return to_res53_mix(x, y);
+}
+#endif
diff --git a/deps/jemalloc/test/include/test/arena_util.h b/deps/jemalloc/test/include/test/arena_util.h
new file mode 100644
index 0000000..9a41dac
--- /dev/null
+++ b/deps/jemalloc/test/include/test/arena_util.h
@@ -0,0 +1,155 @@
+static inline unsigned
+do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
+ unsigned arena_ind;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+
+ expect_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
+ "Unexpected mallctlbymib() failure");
+
+ expect_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
+ "Unexpected mallctlbymib() failure");
+
+ return arena_ind;
+}
+
+static inline void
+do_arena_destroy(unsigned arena_ind) {
+ /*
+ * For convenience, flush tcache in case there are cached items.
+ * However not assert success since the tcache may be disabled.
+ */
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+static inline void
+do_epoch(void) {
+ uint64_t epoch = 1;
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+}
+
+static inline void
+do_purge(unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+static inline void
+do_decay(unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+static inline uint64_t
+get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[2] = (size_t)arena_ind;
+ uint64_t npurge = 0;
+ size_t sz = sizeof(npurge);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
+ config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
+ return npurge;
+}
+
+static inline uint64_t
+get_arena_dirty_npurge(unsigned arena_ind) {
+ do_epoch();
+ return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
+}
+
+static inline uint64_t
+get_arena_dirty_purged(unsigned arena_ind) {
+ do_epoch();
+ return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
+}
+
+static inline uint64_t
+get_arena_muzzy_npurge(unsigned arena_ind) {
+ do_epoch();
+ return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
+}
+
+static inline uint64_t
+get_arena_npurge(unsigned arena_ind) {
+ do_epoch();
+ return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
+ get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
+}
+
+static inline size_t
+get_arena_pdirty(unsigned arena_ind) {
+ do_epoch();
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[2] = (size_t)arena_ind;
+ size_t pdirty;
+ size_t sz = sizeof(pdirty);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+ return pdirty;
+}
+
+static inline size_t
+get_arena_pmuzzy(unsigned arena_ind) {
+ do_epoch();
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[2] = (size_t)arena_ind;
+ size_t pmuzzy;
+ size_t sz = sizeof(pmuzzy);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+ return pmuzzy;
+}
+
+static inline void *
+do_mallocx(size_t size, int flags) {
+ void *p = mallocx(size, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ return p;
+}
+
+static inline void
+generate_dirty(unsigned arena_ind, size_t size) {
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+ void *p = do_mallocx(size, flags);
+ dallocx(p, flags);
+}
+
diff --git a/deps/jemalloc/test/include/test/bench.h b/deps/jemalloc/test/include/test/bench.h
new file mode 100644
index 0000000..0397c94
--- /dev/null
+++ b/deps/jemalloc/test/include/test/bench.h
@@ -0,0 +1,60 @@
+static inline void
+time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
+ void (*func)(void)) {
+ uint64_t i;
+
+ for (i = 0; i < nwarmup; i++) {
+ func();
+ }
+ timer_start(timer);
+ for (i = 0; i < niter; i++) {
+ func();
+ }
+ timer_stop(timer);
+}
+
+#define FMT_NSECS_BUF_SIZE 100
+/* Print nanoseconds / iter into the buffer "buf". */
+static inline void
+fmt_nsecs(uint64_t usec, uint64_t iters, char *buf) {
+ uint64_t nsec = usec * 1000;
+ /* We'll display 3 digits after the decimal point. */
+ uint64_t nsec1000 = nsec * 1000;
+ uint64_t nsecs_per_iter1000 = nsec1000 / iters;
+ uint64_t intpart = nsecs_per_iter1000 / 1000;
+ uint64_t fracpart = nsecs_per_iter1000 % 1000;
+ malloc_snprintf(buf, FMT_NSECS_BUF_SIZE, "%"FMTu64".%03"FMTu64, intpart,
+ fracpart);
+}
+
+static inline void
+compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
+ void (*func_a), const char *name_b, void (*func_b)) {
+ timedelta_t timer_a, timer_b;
+ char ratio_buf[6];
+ void *p;
+
+ p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+
+ time_func(&timer_a, nwarmup, niter, func_a);
+ time_func(&timer_b, nwarmup, niter, func_b);
+
+ uint64_t usec_a = timer_usec(&timer_a);
+ char buf_a[FMT_NSECS_BUF_SIZE];
+ fmt_nsecs(usec_a, niter, buf_a);
+
+ uint64_t usec_b = timer_usec(&timer_b);
+ char buf_b[FMT_NSECS_BUF_SIZE];
+ fmt_nsecs(usec_b, niter, buf_b);
+
+ timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf));
+ malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us (%s ns/iter), "
+ "%s=%"FMTu64"us (%s ns/iter), ratio=1:%s\n",
+ niter, name_a, usec_a, buf_a, name_b, usec_b, buf_b, ratio_buf);
+
+ dallocx(p, 0);
+}
diff --git a/deps/jemalloc/test/include/test/bgthd.h b/deps/jemalloc/test/include/test/bgthd.h
new file mode 100644
index 0000000..4fa2395
--- /dev/null
+++ b/deps/jemalloc/test/include/test/bgthd.h
@@ -0,0 +1,17 @@
+/*
+ * Shared utility for checking if background_thread is enabled, which affects
+ * the purging behavior and assumptions in some tests.
+ */
+
+static inline bool
+is_background_thread_enabled(void) {
+ bool enabled;
+ size_t sz = sizeof(bool);
+ int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0);
+ if (ret == ENOENT) {
+ return false;
+ }
+ assert_d_eq(ret, 0, "Unexpected mallctl error");
+
+ return enabled;
+}
diff --git a/deps/jemalloc/test/include/test/btalloc.h b/deps/jemalloc/test/include/test/btalloc.h
new file mode 100644
index 0000000..8f34599
--- /dev/null
+++ b/deps/jemalloc/test/include/test/btalloc.h
@@ -0,0 +1,30 @@
+/* btalloc() provides a mechanism for allocating via permuted backtraces. */
+void *btalloc(size_t size, unsigned bits);
+
+#define btalloc_n_proto(n) \
+void *btalloc_##n(size_t size, unsigned bits);
+btalloc_n_proto(0)
+btalloc_n_proto(1)
+
+#define btalloc_n_gen(n) \
+void * \
+btalloc_##n(size_t size, unsigned bits) { \
+ void *p; \
+ \
+ if (bits == 0) { \
+ p = mallocx(size, 0); \
+ } else { \
+ switch (bits & 0x1U) { \
+ case 0: \
+ p = (btalloc_0(size, bits >> 1)); \
+ break; \
+ case 1: \
+ p = (btalloc_1(size, bits >> 1)); \
+ break; \
+ default: not_reached(); \
+ } \
+ } \
+ /* Intentionally sabotage tail call optimization. */ \
+ expect_ptr_not_null(p, "Unexpected mallocx() failure"); \
+ return p; \
+}
diff --git a/deps/jemalloc/test/include/test/extent_hooks.h b/deps/jemalloc/test/include/test/extent_hooks.h
new file mode 100644
index 0000000..aad0a46
--- /dev/null
+++ b/deps/jemalloc/test/include/test/extent_hooks.h
@@ -0,0 +1,289 @@
+/*
+ * Boilerplate code used for testing extent hooks via interception and
+ * passthrough.
+ */
+
+static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit,
+ unsigned arena_ind);
+static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks,
+ void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t size_a, size_t size_b, bool committed,
+ unsigned arena_ind);
+static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a,
+ size_t size_a, void *addr_b, size_t size_b, bool committed,
+ unsigned arena_ind);
+
+static extent_hooks_t *default_hooks;
+static extent_hooks_t hooks = {
+ extent_alloc_hook,
+ extent_dalloc_hook,
+ extent_destroy_hook,
+ extent_commit_hook,
+ extent_decommit_hook,
+ extent_purge_lazy_hook,
+ extent_purge_forced_hook,
+ extent_split_hook,
+ extent_merge_hook
+};
+
+/* Control whether hook functions pass calls through to default hooks. */
+static bool try_alloc = true;
+static bool try_dalloc = true;
+static bool try_destroy = true;
+static bool try_commit = true;
+static bool try_decommit = true;
+static bool try_purge_lazy = true;
+static bool try_purge_forced = true;
+static bool try_split = true;
+static bool try_merge = true;
+
+/* Set to false prior to operations, then introspect after operations. */
+static bool called_alloc;
+static bool called_dalloc;
+static bool called_destroy;
+static bool called_commit;
+static bool called_decommit;
+static bool called_purge_lazy;
+static bool called_purge_forced;
+static bool called_split;
+static bool called_merge;
+
+/* Set to false prior to operations, then introspect after operations. */
+static bool did_alloc;
+static bool did_dalloc;
+static bool did_destroy;
+static bool did_commit;
+static bool did_decommit;
+static bool did_purge_lazy;
+static bool did_purge_forced;
+static bool did_split;
+static bool did_merge;
+
+#if 0
+# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
+#else
+# define TRACE_HOOK(fmt, ...)
+#endif
+
+static void *
+extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ void *ret;
+
+ TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, "
+ "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks,
+ new_addr, size, alignment, *zero ? "true" : "false", *commit ?
+ "true" : "false", arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->alloc, extent_alloc_hook,
+ "Wrong hook function");
+ called_alloc = true;
+ if (!try_alloc) {
+ return NULL;
+ }
+ ret = default_hooks->alloc(default_hooks, new_addr, size, alignment,
+ zero, commit, 0);
+ did_alloc = (ret != NULL);
+ return ret;
+}
+
+static bool
+extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
+ "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
+ "true" : "false", arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook,
+ "Wrong hook function");
+ called_dalloc = true;
+ if (!try_dalloc) {
+ return true;
+ }
+ err = default_hooks->dalloc(default_hooks, addr, size, committed, 0);
+ did_dalloc = !err;
+ return err;
+}
+
+static void
+extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
+ "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
+ "true" : "false", arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->destroy, extent_destroy_hook,
+ "Wrong hook function");
+ called_destroy = true;
+ if (!try_destroy) {
+ return;
+ }
+ default_hooks->destroy(default_hooks, addr, size, committed, 0);
+ did_destroy = true;
+}
+
+static bool
+extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
+ "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
+ offset, length, arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->commit, extent_commit_hook,
+ "Wrong hook function");
+ called_commit = true;
+ if (!try_commit) {
+ return true;
+ }
+ err = default_hooks->commit(default_hooks, addr, size, offset, length,
+ 0);
+ did_commit = !err;
+ return err;
+}
+
+static bool
+extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
+ "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
+ offset, length, arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->decommit, extent_decommit_hook,
+ "Wrong hook function");
+ called_decommit = true;
+ if (!try_decommit) {
+ return true;
+ }
+ err = default_hooks->decommit(default_hooks, addr, size, offset, length,
+ 0);
+ did_decommit = !err;
+ return err;
+}
+
+static bool
+extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
+ "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
+ offset, length, arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook,
+ "Wrong hook function");
+ called_purge_lazy = true;
+ if (!try_purge_lazy) {
+ return true;
+ }
+ err = default_hooks->purge_lazy == NULL ||
+ default_hooks->purge_lazy(default_hooks, addr, size, offset, length,
+ 0);
+ did_purge_lazy = !err;
+ return err;
+}
+
+static bool
+extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
+ "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
+ offset, length, arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook,
+ "Wrong hook function");
+ called_purge_forced = true;
+ if (!try_purge_forced) {
+ return true;
+ }
+ err = default_hooks->purge_forced == NULL ||
+ default_hooks->purge_forced(default_hooks, addr, size, offset,
+ length, 0);
+ did_purge_forced = !err;
+ return err;
+}
+
+static bool
+extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, "
+ "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
+ addr, size, size_a, size_b, committed ? "true" : "false",
+ arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->split, extent_split_hook,
+ "Wrong hook function");
+ called_split = true;
+ if (!try_split) {
+ return true;
+ }
+ err = (default_hooks->split == NULL ||
+ default_hooks->split(default_hooks, addr, size, size_a, size_b,
+ committed, 0));
+ did_split = !err;
+ return err;
+}
+
+static bool
+extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p "
+ "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
+ addr_a, size_a, addr_b, size_b, committed ? "true" : "false",
+ arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->merge, extent_merge_hook,
+ "Wrong hook function");
+ expect_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b,
+ "Extents not mergeable");
+ called_merge = true;
+ if (!try_merge) {
+ return true;
+ }
+ err = (default_hooks->merge == NULL ||
+ default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b,
+ committed, 0));
+ did_merge = !err;
+ return err;
+}
+
+static void
+extent_hooks_prep(void) {
+ size_t sz;
+
+ sz = sizeof(default_hooks);
+ expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz,
+ NULL, 0), 0, "Unexpected mallctl() error");
+}
diff --git a/deps/jemalloc/test/include/test/jemalloc_test.h.in b/deps/jemalloc/test/include/test/jemalloc_test.h.in
new file mode 100644
index 0000000..3f8c0da
--- /dev/null
+++ b/deps/jemalloc/test/include/test/jemalloc_test.h.in
@@ -0,0 +1,180 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <limits.h>
+#ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+#endif
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <math.h>
+#include <string.h>
+#ifdef _WIN32
+# include "msvc_compat/strings.h"
+#endif
+
+#ifdef _WIN32
+# include <windows.h>
+# include "msvc_compat/windows_extra.h"
+#else
+# include <pthread.h>
+#endif
+
+#include "test/jemalloc_test_defs.h"
+
+#if defined(JEMALLOC_OSATOMIC)
+# include <libkern/OSAtomic.h>
+#endif
+
+#if defined(HAVE_ALTIVEC) && !defined(__APPLE__)
+# include <altivec.h>
+#endif
+#ifdef HAVE_SSE2
+# include <emmintrin.h>
+#endif
+
+/******************************************************************************/
+/*
+ * For unit tests and analytics tests, expose all public and private interfaces.
+ */
+#if defined(JEMALLOC_UNIT_TEST) || defined (JEMALLOC_ANALYZE_TEST)
+# define JEMALLOC_JET
+# define JEMALLOC_MANGLE
+# include "jemalloc/internal/jemalloc_preamble.h"
+# include "jemalloc/internal/jemalloc_internal_includes.h"
+
+/******************************************************************************/
+/*
+ * For integration tests, expose the public jemalloc interfaces, but only
+ * expose the minimum necessary internal utility code (to avoid re-implementing
+ * essentially identical code within the test infrastructure).
+ */
+#elif defined(JEMALLOC_INTEGRATION_TEST) || \
+ defined(JEMALLOC_INTEGRATION_CPP_TEST)
+# define JEMALLOC_MANGLE
+# include "jemalloc/jemalloc@install_suffix@.h"
+# include "jemalloc/internal/jemalloc_internal_defs.h"
+# include "jemalloc/internal/jemalloc_internal_macros.h"
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+
+# define JEMALLOC_N(n) @private_namespace@##n
+# include "jemalloc/internal/private_namespace.h"
+# include "jemalloc/internal/test_hooks.h"
+
+/* Hermetic headers. */
+# include "jemalloc/internal/assert.h"
+# include "jemalloc/internal/malloc_io.h"
+# include "jemalloc/internal/nstime.h"
+# include "jemalloc/internal/util.h"
+
+/* Non-hermetic headers. */
+# include "jemalloc/internal/qr.h"
+# include "jemalloc/internal/ql.h"
+
+/******************************************************************************/
+/*
+ * For stress tests, expose the public jemalloc interfaces with name mangling
+ * so that they can be tested as e.g. malloc() and free(). Also expose the
+ * public jemalloc interfaces with jet_ prefixes, so that stress tests can use
+ * a separate allocator for their internal data structures.
+ */
+#elif defined(JEMALLOC_STRESS_TEST)
+# include "jemalloc/jemalloc@install_suffix@.h"
+
+# include "jemalloc/jemalloc_protos_jet.h"
+
+# define JEMALLOC_JET
+# include "jemalloc/internal/jemalloc_preamble.h"
+# include "jemalloc/internal/jemalloc_internal_includes.h"
+# include "jemalloc/internal/public_unnamespace.h"
+# undef JEMALLOC_JET
+
+# include "jemalloc/jemalloc_rename.h"
+# define JEMALLOC_MANGLE
+# ifdef JEMALLOC_STRESS_TESTLIB
+# include "jemalloc/jemalloc_mangle_jet.h"
+# else
+# include "jemalloc/jemalloc_mangle.h"
+# endif
+
+/******************************************************************************/
+/*
+ * This header does dangerous things, the effects of which only test code
+ * should be subject to.
+ */
+#else
+# error "This header cannot be included outside a testing context"
+#endif
+
+/******************************************************************************/
+/*
+ * Common test utilities.
+ */
+#include "test/btalloc.h"
+#include "test/math.h"
+#include "test/mtx.h"
+#include "test/mq.h"
+#include "test/sleep.h"
+#include "test/test.h"
+#include "test/timer.h"
+#include "test/thd.h"
+#include "test/bgthd.h"
+#define MEXP 19937
+#include "test/SFMT.h"
+
+#ifndef JEMALLOC_HAVE_MALLOC_SIZE
+#define TEST_MALLOC_SIZE malloc_usable_size
+#else
+#define TEST_MALLOC_SIZE malloc_size
+#endif
+/******************************************************************************/
+/*
+ * Define always-enabled assertion macros, so that test assertions execute even
+ * if assertions are disabled in the library code.
+ */
+#undef assert
+#undef not_reached
+#undef not_implemented
+#undef expect_not_implemented
+
+#define assert(e) do { \
+ if (!(e)) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define not_implemented() do { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define expect_not_implemented(e) do { \
+ if (!(e)) { \
+ not_implemented(); \
+ } \
+} while (0)
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
new file mode 100644
index 0000000..5cc8532
--- /dev/null
+++ b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
@@ -0,0 +1,9 @@
+#include "jemalloc/internal/jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
+
+/*
+ * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its
+ * dependencies are notoriously unportable in practice.
+ */
+#undef HAVE_SSE2
+#undef HAVE_ALTIVEC
diff --git a/deps/jemalloc/test/include/test/math.h b/deps/jemalloc/test/include/test/math.h
new file mode 100644
index 0000000..efba086
--- /dev/null
+++ b/deps/jemalloc/test/include/test/math.h
@@ -0,0 +1,306 @@
+/*
+ * Compute the natural log of Gamma(x), accurate to 10 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
+ * [S14]. Communications of the ACM 9(9):684.
+ */
+static inline double
+ln_gamma(double x) {
+ double f, z;
+
+ assert(x > 0.0);
+
+ if (x < 7.0) {
+ f = 1.0;
+ z = x;
+ while (z < 7.0) {
+ f *= z;
+ z += 1.0;
+ }
+ x = z;
+ f = -log(f);
+ } else {
+ f = 0.0;
+ }
+
+ z = 1.0 / (x * x);
+
+ return f + (x-0.5) * log(x) - x + 0.918938533204673 +
+ (((-0.000595238095238 * z + 0.000793650793651) * z -
+ 0.002777777777778) * z + 0.083333333333333) / x;
+}
+
+/*
+ * Compute the incomplete Gamma ratio for [0..x], where p is the shape
+ * parameter, and ln_gamma_p is ln_gamma(p).
+ *
+ * This implementation is based on:
+ *
+ * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
+ * Applied Statistics 19:285-287.
+ */
+static inline double
+i_gamma(double x, double p, double ln_gamma_p) {
+ double acu, factor, oflo, gin, term, rn, a, b, an, dif;
+ double pn[6];
+ unsigned i;
+
+ assert(p > 0.0);
+ assert(x >= 0.0);
+
+ if (x == 0.0) {
+ return 0.0;
+ }
+
+ acu = 1.0e-10;
+ oflo = 1.0e30;
+ gin = 0.0;
+ factor = exp(p * log(x) - x - ln_gamma_p);
+
+ if (x <= 1.0 || x < p) {
+ /* Calculation by series expansion. */
+ gin = 1.0;
+ term = 1.0;
+ rn = p;
+
+ while (true) {
+ rn += 1.0;
+ term *= x / rn;
+ gin += term;
+ if (term <= acu) {
+ gin *= factor / p;
+ return gin;
+ }
+ }
+ } else {
+ /* Calculation by continued fraction. */
+ a = 1.0 - p;
+ b = a + x + 1.0;
+ term = 0.0;
+ pn[0] = 1.0;
+ pn[1] = x;
+ pn[2] = x + 1.0;
+ pn[3] = x * b;
+ gin = pn[2] / pn[3];
+
+ while (true) {
+ a += 1.0;
+ b += 2.0;
+ term += 1.0;
+ an = a * term;
+ for (i = 0; i < 2; i++) {
+ pn[i+4] = b * pn[i+2] - an * pn[i];
+ }
+ if (pn[5] != 0.0) {
+ rn = pn[4] / pn[5];
+ dif = fabs(gin - rn);
+ if (dif <= acu && dif <= acu * rn) {
+ gin = 1.0 - factor * gin;
+ return gin;
+ }
+ gin = rn;
+ }
+ for (i = 0; i < 4; i++) {
+ pn[i] = pn[i+2];
+ }
+
+ if (fabs(pn[4]) >= oflo) {
+ for (i = 0; i < 4; i++) {
+ pn[i] /= oflo;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Given a value p in [0..1] of the lower tail area of the normal distribution,
+ * compute the limit on the definite integral from [-inf..z] that satisfies p,
+ * accurate to 16 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
+ * distribution. Applied Statistics 37(3):477-484.
+ */
+static inline double
+pt_norm(double p) {
+ double q, r, ret;
+
+ assert(p > 0.0 && p < 1.0);
+
+ q = p - 0.5;
+ if (fabs(q) <= 0.425) {
+ /* p close to 1/2. */
+ r = 0.180625 - q * q;
+ return q * (((((((2.5090809287301226727e3 * r +
+ 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
+ + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
+ r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
+ * r + 3.3871328727963666080e0) /
+ (((((((5.2264952788528545610e3 * r +
+ 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
+ + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
+ r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
+ * r + 1.0);
+ } else {
+ if (q < 0.0) {
+ r = p;
+ } else {
+ r = 1.0 - p;
+ }
+ assert(r > 0.0);
+
+ r = sqrt(-log(r));
+ if (r <= 5.0) {
+ /* p neither close to 1/2 nor 0 or 1. */
+ r -= 1.6;
+ ret = ((((((((7.74545014278341407640e-4 * r +
+ 2.27238449892691845833e-2) * r +
+ 2.41780725177450611770e-1) * r +
+ 1.27045825245236838258e0) * r +
+ 3.64784832476320460504e0) * r +
+ 5.76949722146069140550e0) * r +
+ 4.63033784615654529590e0) * r +
+ 1.42343711074968357734e0) /
+ (((((((1.05075007164441684324e-9 * r +
+ 5.47593808499534494600e-4) * r +
+ 1.51986665636164571966e-2)
+ * r + 1.48103976427480074590e-1) * r +
+ 6.89767334985100004550e-1) * r +
+ 1.67638483018380384940e0) * r +
+ 2.05319162663775882187e0) * r + 1.0));
+ } else {
+ /* p near 0 or 1. */
+ r -= 5.0;
+ ret = ((((((((2.01033439929228813265e-7 * r +
+ 2.71155556874348757815e-5) * r +
+ 1.24266094738807843860e-3) * r +
+ 2.65321895265761230930e-2) * r +
+ 2.96560571828504891230e-1) * r +
+ 1.78482653991729133580e0) * r +
+ 5.46378491116411436990e0) * r +
+ 6.65790464350110377720e0) /
+ (((((((2.04426310338993978564e-15 * r +
+ 1.42151175831644588870e-7) * r +
+ 1.84631831751005468180e-5) * r +
+ 7.86869131145613259100e-4) * r +
+ 1.48753612908506148525e-2) * r +
+ 1.36929880922735805310e-1) * r +
+ 5.99832206555887937690e-1)
+ * r + 1.0));
+ }
+ if (q < 0.0) {
+ ret = -ret;
+ }
+ return ret;
+ }
+}
+
+/*
+ * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution
+ * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute
+ * the upper limit on the definite integral from [0..z] that satisfies p,
+ * accurate to 12 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of
+ * the Chi^2 distribution. Applied Statistics 24(3):385-388.
+ *
+ * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
+ * points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
+ */
+static inline double
+pt_chi2(double p, double df, double ln_gamma_df_2) {
+ double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
+ unsigned i;
+
+ assert(p >= 0.0 && p < 1.0);
+ assert(df > 0.0);
+
+ e = 5.0e-7;
+ aa = 0.6931471805;
+
+ xx = 0.5 * df;
+ c = xx - 1.0;
+
+ if (df < -1.24 * log(p)) {
+ /* Starting approximation for small Chi^2. */
+ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
+ if (ch - e < 0.0) {
+ return ch;
+ }
+ } else {
+ if (df > 0.32) {
+ x = pt_norm(p);
+ /*
+ * Starting approximation using Wilson and Hilferty
+ * estimate.
+ */
+ p1 = 0.222222 / df;
+ ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0);
+ /* Starting approximation for p tending to 1. */
+ if (ch > 2.2 * df + 6.0) {
+ ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) +
+ ln_gamma_df_2);
+ }
+ } else {
+ ch = 0.4;
+ a = log(1.0 - p);
+ while (true) {
+ q = ch;
+ p1 = 1.0 + ch * (4.67 + ch);
+ p2 = ch * (6.73 + ch * (6.66 + ch));
+ t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch
+ * (13.32 + 3.0 * ch)) / p2;
+ ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
+ c * aa) * p2 / p1) / t;
+ if (fabs(q / ch - 1.0) - 0.01 <= 0.0) {
+ break;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < 20; i++) {
+ /* Calculation of seven-term Taylor series. */
+ q = ch;
+ p1 = 0.5 * ch;
+ if (p1 < 0.0) {
+ return -1.0;
+ }
+ p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
+ t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
+ b = t / ch;
+ a = 0.5 * t - b * c;
+ s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 +
+ 60.0 * a))))) / 420.0;
+ s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 *
+ a)))) / 2520.0;
+ s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0;
+ s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a *
+ (889.0 + 1740.0 * a))) / 5040.0;
+ s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0;
+ s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
+ ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
+ - b * (s4 - b * (s5 - b * s6))))));
+ if (fabs(q / ch - 1.0) <= e) {
+ break;
+ }
+ }
+
+ return ch;
+}
+
+/*
+ * Given a value p in [0..1] and Gamma distribution shape and scale parameters,
+ * compute the upper limit on the definite integral from [0..z] that satisfies
+ * p.
+ */
+static inline double
+pt_gamma(double p, double shape, double scale, double ln_gamma_shape) {
+ return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale;
+}
diff --git a/deps/jemalloc/test/include/test/mq.h b/deps/jemalloc/test/include/test/mq.h
new file mode 100644
index 0000000..5dc6486
--- /dev/null
+++ b/deps/jemalloc/test/include/test/mq.h
@@ -0,0 +1,107 @@
+#include "test/sleep.h"
+
+/*
+ * Simple templated message queue implementation that relies on only mutexes for
+ * synchronization (which reduces portability issues). Given the following
+ * setup:
+ *
+ * typedef struct mq_msg_s mq_msg_t;
+ * struct mq_msg_s {
+ * mq_msg(mq_msg_t) link;
+ * [message data]
+ * };
+ * mq_gen(, mq_, mq_t, mq_msg_t, link)
+ *
+ * The API is as follows:
+ *
+ * bool mq_init(mq_t *mq);
+ * void mq_fini(mq_t *mq);
+ * unsigned mq_count(mq_t *mq);
+ * mq_msg_t *mq_tryget(mq_t *mq);
+ * mq_msg_t *mq_get(mq_t *mq);
+ * void mq_put(mq_t *mq, mq_msg_t *msg);
+ *
+ * The message queue linkage embedded in each message is to be treated as
+ * externally opaque (no need to initialize or clean up externally). mq_fini()
+ * does not perform any cleanup of messages, since it knows nothing of their
+ * payloads.
+ */
+#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type)
+
+#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \
+typedef struct { \
+ mtx_t lock; \
+ ql_head(a_mq_msg_type) msgs; \
+ unsigned count; \
+} a_mq_type; \
+a_attr bool \
+a_prefix##init(a_mq_type *mq) { \
+ \
+ if (mtx_init(&mq->lock)) { \
+ return true; \
+ } \
+ ql_new(&mq->msgs); \
+ mq->count = 0; \
+ return false; \
+} \
+a_attr void \
+a_prefix##fini(a_mq_type *mq) { \
+ mtx_fini(&mq->lock); \
+} \
+a_attr unsigned \
+a_prefix##count(a_mq_type *mq) { \
+ unsigned count; \
+ \
+ mtx_lock(&mq->lock); \
+ count = mq->count; \
+ mtx_unlock(&mq->lock); \
+ return count; \
+} \
+a_attr a_mq_msg_type * \
+a_prefix##tryget(a_mq_type *mq) { \
+ a_mq_msg_type *msg; \
+ \
+ mtx_lock(&mq->lock); \
+ msg = ql_first(&mq->msgs); \
+ if (msg != NULL) { \
+ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \
+ mq->count--; \
+ } \
+ mtx_unlock(&mq->lock); \
+ return msg; \
+} \
+a_attr a_mq_msg_type * \
+a_prefix##get(a_mq_type *mq) { \
+ a_mq_msg_type *msg; \
+ unsigned ns; \
+ \
+ msg = a_prefix##tryget(mq); \
+ if (msg != NULL) { \
+ return msg; \
+ } \
+ \
+ ns = 1; \
+ while (true) { \
+ sleep_ns(ns); \
+ msg = a_prefix##tryget(mq); \
+ if (msg != NULL) { \
+ return msg; \
+ } \
+ if (ns < 1000*1000*1000) { \
+ /* Double sleep time, up to max 1 second. */ \
+ ns <<= 1; \
+ if (ns > 1000*1000*1000) { \
+ ns = 1000*1000*1000; \
+ } \
+ } \
+ } \
+} \
+a_attr void \
+a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \
+ \
+ mtx_lock(&mq->lock); \
+ ql_elm_new(msg, a_field); \
+ ql_tail_insert(&mq->msgs, msg, a_field); \
+ mq->count++; \
+ mtx_unlock(&mq->lock); \
+}
diff --git a/deps/jemalloc/test/include/test/mtx.h b/deps/jemalloc/test/include/test/mtx.h
new file mode 100644
index 0000000..066a213
--- /dev/null
+++ b/deps/jemalloc/test/include/test/mtx.h
@@ -0,0 +1,21 @@
+/*
+ * mtx is a slightly simplified version of malloc_mutex. This code duplication
+ * is unfortunate, but there are allocator bootstrapping considerations that
+ * would leak into the test infrastructure if malloc_mutex were used directly
+ * in tests.
+ */
+
+typedef struct {
+#ifdef _WIN32
+ CRITICAL_SECTION lock;
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock lock;
+#else
+ pthread_mutex_t lock;
+#endif
+} mtx_t;
+
+bool mtx_init(mtx_t *mtx);
+void mtx_fini(mtx_t *mtx);
+void mtx_lock(mtx_t *mtx);
+void mtx_unlock(mtx_t *mtx);
diff --git a/deps/jemalloc/test/include/test/nbits.h b/deps/jemalloc/test/include/test/nbits.h
new file mode 100644
index 0000000..c06cf1b
--- /dev/null
+++ b/deps/jemalloc/test/include/test/nbits.h
@@ -0,0 +1,111 @@
+#ifndef TEST_NBITS_H
+#define TEST_NBITS_H
+
+/* Interesting bitmap counts to test. */
+
+#define NBITS_TAB \
+ NB( 1) \
+ NB( 2) \
+ NB( 3) \
+ NB( 4) \
+ NB( 5) \
+ NB( 6) \
+ NB( 7) \
+ NB( 8) \
+ NB( 9) \
+ NB(10) \
+ NB(11) \
+ NB(12) \
+ NB(13) \
+ NB(14) \
+ NB(15) \
+ NB(16) \
+ NB(17) \
+ NB(18) \
+ NB(19) \
+ NB(20) \
+ NB(21) \
+ NB(22) \
+ NB(23) \
+ NB(24) \
+ NB(25) \
+ NB(26) \
+ NB(27) \
+ NB(28) \
+ NB(29) \
+ NB(30) \
+ NB(31) \
+ NB(32) \
+ \
+ NB(33) \
+ NB(34) \
+ NB(35) \
+ NB(36) \
+ NB(37) \
+ NB(38) \
+ NB(39) \
+ NB(40) \
+ NB(41) \
+ NB(42) \
+ NB(43) \
+ NB(44) \
+ NB(45) \
+ NB(46) \
+ NB(47) \
+ NB(48) \
+ NB(49) \
+ NB(50) \
+ NB(51) \
+ NB(52) \
+ NB(53) \
+ NB(54) \
+ NB(55) \
+ NB(56) \
+ NB(57) \
+ NB(58) \
+ NB(59) \
+ NB(60) \
+ NB(61) \
+ NB(62) \
+ NB(63) \
+ NB(64) \
+ NB(65) \
+ NB(66) \
+ NB(67) \
+ \
+ NB(126) \
+ NB(127) \
+ NB(128) \
+ NB(129) \
+ NB(130) \
+ \
+ NB(254) \
+ NB(255) \
+ NB(256) \
+ NB(257) \
+ NB(258) \
+ \
+ NB(510) \
+ NB(511) \
+ NB(512) \
+ NB(513) \
+ NB(514) \
+ \
+ NB(1022) \
+ NB(1023) \
+ NB(1024) \
+ NB(1025) \
+ NB(1026) \
+ \
+ NB(2048) \
+ \
+ NB(4094) \
+ NB(4095) \
+ NB(4096) \
+ NB(4097) \
+ NB(4098) \
+ \
+ NB(8192) \
+ NB(16384)
+
+#endif /* TEST_NBITS_H */
diff --git a/deps/jemalloc/test/include/test/san.h b/deps/jemalloc/test/include/test/san.h
new file mode 100644
index 0000000..da07865
--- /dev/null
+++ b/deps/jemalloc/test/include/test/san.h
@@ -0,0 +1,14 @@
+#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
+# define TEST_SAN_UAF_ALIGN_ENABLE "lg_san_uaf_align:12"
+# define TEST_SAN_UAF_ALIGN_DISABLE "lg_san_uaf_align:-1"
+#else
+# define TEST_SAN_UAF_ALIGN_ENABLE ""
+# define TEST_SAN_UAF_ALIGN_DISABLE ""
+#endif
+
+static inline bool
+extent_is_guarded(tsdn_t *tsdn, void *ptr) {
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ return edata_guarded_get(edata);
+}
+
diff --git a/deps/jemalloc/test/include/test/sleep.h b/deps/jemalloc/test/include/test/sleep.h
new file mode 100644
index 0000000..c232f63
--- /dev/null
+++ b/deps/jemalloc/test/include/test/sleep.h
@@ -0,0 +1 @@
+void sleep_ns(unsigned ns);
diff --git a/deps/jemalloc/test/include/test/test.h b/deps/jemalloc/test/include/test/test.h
new file mode 100644
index 0000000..d4b6591
--- /dev/null
+++ b/deps/jemalloc/test/include/test/test.h
@@ -0,0 +1,583 @@
+#define ASSERT_BUFSIZE 256
+
+#define verify_cmp(may_abort, t, a, b, cmp, neg_cmp, pri, ...) do { \
+ const t a_ = (a); \
+ const t b_ = (b); \
+ if (!(a_ cmp b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) " #cmp " (%s) --> " \
+ "%" pri " " #neg_cmp " %" pri ": ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_, b_); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ if (may_abort) { \
+ abort(); \
+ } else { \
+ p_test_fail(prefix, message); \
+ } \
+ } \
+} while (0)
+
+#define expect_cmp(t, a, b, cmp, neg_cmp, pri, ...) verify_cmp(false, \
+ t, a, b, cmp, neg_cmp, pri, __VA_ARGS__)
+
+#define expect_ptr_eq(a, b, ...) expect_cmp(void *, a, b, ==, \
+ !=, "p", __VA_ARGS__)
+#define expect_ptr_ne(a, b, ...) expect_cmp(void *, a, b, !=, \
+ ==, "p", __VA_ARGS__)
+#define expect_ptr_null(a, ...) expect_cmp(void *, a, NULL, ==, \
+ !=, "p", __VA_ARGS__)
+#define expect_ptr_not_null(a, ...) expect_cmp(void *, a, NULL, !=, \
+ ==, "p", __VA_ARGS__)
+
+#define expect_c_eq(a, b, ...) expect_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
+#define expect_c_ne(a, b, ...) expect_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
+#define expect_c_lt(a, b, ...) expect_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
+#define expect_c_le(a, b, ...) expect_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
+#define expect_c_ge(a, b, ...) expect_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
+#define expect_c_gt(a, b, ...) expect_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
+
+#define expect_x_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
+#define expect_x_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
+#define expect_x_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
+#define expect_x_le(a, b, ...) expect_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
+#define expect_x_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
+#define expect_x_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
+
+#define expect_d_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
+#define expect_d_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
+#define expect_d_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
+#define expect_d_le(a, b, ...) expect_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
+#define expect_d_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
+#define expect_d_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
+
+#define expect_u_eq(a, b, ...) expect_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
+#define expect_u_ne(a, b, ...) expect_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
+#define expect_u_lt(a, b, ...) expect_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
+#define expect_u_le(a, b, ...) expect_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
+#define expect_u_ge(a, b, ...) expect_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
+#define expect_u_gt(a, b, ...) expect_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
+
+#define expect_ld_eq(a, b, ...) expect_cmp(long, a, b, ==, \
+ !=, "ld", __VA_ARGS__)
+#define expect_ld_ne(a, b, ...) expect_cmp(long, a, b, !=, \
+ ==, "ld", __VA_ARGS__)
+#define expect_ld_lt(a, b, ...) expect_cmp(long, a, b, <, \
+ >=, "ld", __VA_ARGS__)
+#define expect_ld_le(a, b, ...) expect_cmp(long, a, b, <=, \
+ >, "ld", __VA_ARGS__)
+#define expect_ld_ge(a, b, ...) expect_cmp(long, a, b, >=, \
+ <, "ld", __VA_ARGS__)
+#define expect_ld_gt(a, b, ...) expect_cmp(long, a, b, >, \
+ <=, "ld", __VA_ARGS__)
+
+#define expect_lu_eq(a, b, ...) expect_cmp(unsigned long, \
+ a, b, ==, !=, "lu", __VA_ARGS__)
+#define expect_lu_ne(a, b, ...) expect_cmp(unsigned long, \
+ a, b, !=, ==, "lu", __VA_ARGS__)
+#define expect_lu_lt(a, b, ...) expect_cmp(unsigned long, \
+ a, b, <, >=, "lu", __VA_ARGS__)
+#define expect_lu_le(a, b, ...) expect_cmp(unsigned long, \
+ a, b, <=, >, "lu", __VA_ARGS__)
+#define expect_lu_ge(a, b, ...) expect_cmp(unsigned long, \
+ a, b, >=, <, "lu", __VA_ARGS__)
+#define expect_lu_gt(a, b, ...) expect_cmp(unsigned long, \
+ a, b, >, <=, "lu", __VA_ARGS__)
+
+#define expect_qd_eq(a, b, ...) expect_cmp(long long, a, b, ==, \
+ !=, "qd", __VA_ARGS__)
+#define expect_qd_ne(a, b, ...) expect_cmp(long long, a, b, !=, \
+ ==, "qd", __VA_ARGS__)
+#define expect_qd_lt(a, b, ...) expect_cmp(long long, a, b, <, \
+ >=, "qd", __VA_ARGS__)
+#define expect_qd_le(a, b, ...) expect_cmp(long long, a, b, <=, \
+ >, "qd", __VA_ARGS__)
+#define expect_qd_ge(a, b, ...) expect_cmp(long long, a, b, >=, \
+ <, "qd", __VA_ARGS__)
+#define expect_qd_gt(a, b, ...) expect_cmp(long long, a, b, >, \
+ <=, "qd", __VA_ARGS__)
+
+#define expect_qu_eq(a, b, ...) expect_cmp(unsigned long long, \
+ a, b, ==, !=, "qu", __VA_ARGS__)
+#define expect_qu_ne(a, b, ...) expect_cmp(unsigned long long, \
+ a, b, !=, ==, "qu", __VA_ARGS__)
+#define expect_qu_lt(a, b, ...) expect_cmp(unsigned long long, \
+ a, b, <, >=, "qu", __VA_ARGS__)
+#define expect_qu_le(a, b, ...) expect_cmp(unsigned long long, \
+ a, b, <=, >, "qu", __VA_ARGS__)
+#define expect_qu_ge(a, b, ...) expect_cmp(unsigned long long, \
+ a, b, >=, <, "qu", __VA_ARGS__)
+#define expect_qu_gt(a, b, ...) expect_cmp(unsigned long long, \
+ a, b, >, <=, "qu", __VA_ARGS__)
+
+#define expect_jd_eq(a, b, ...) expect_cmp(intmax_t, a, b, ==, \
+ !=, "jd", __VA_ARGS__)
+#define expect_jd_ne(a, b, ...) expect_cmp(intmax_t, a, b, !=, \
+ ==, "jd", __VA_ARGS__)
+#define expect_jd_lt(a, b, ...) expect_cmp(intmax_t, a, b, <, \
+ >=, "jd", __VA_ARGS__)
+#define expect_jd_le(a, b, ...) expect_cmp(intmax_t, a, b, <=, \
+ >, "jd", __VA_ARGS__)
+#define expect_jd_ge(a, b, ...) expect_cmp(intmax_t, a, b, >=, \
+ <, "jd", __VA_ARGS__)
+#define expect_jd_gt(a, b, ...) expect_cmp(intmax_t, a, b, >, \
+ <=, "jd", __VA_ARGS__)
+
+#define expect_ju_eq(a, b, ...) expect_cmp(uintmax_t, a, b, ==, \
+ !=, "ju", __VA_ARGS__)
+#define expect_ju_ne(a, b, ...) expect_cmp(uintmax_t, a, b, !=, \
+ ==, "ju", __VA_ARGS__)
+#define expect_ju_lt(a, b, ...) expect_cmp(uintmax_t, a, b, <, \
+ >=, "ju", __VA_ARGS__)
+#define expect_ju_le(a, b, ...) expect_cmp(uintmax_t, a, b, <=, \
+ >, "ju", __VA_ARGS__)
+#define expect_ju_ge(a, b, ...) expect_cmp(uintmax_t, a, b, >=, \
+ <, "ju", __VA_ARGS__)
+#define expect_ju_gt(a, b, ...) expect_cmp(uintmax_t, a, b, >, \
+ <=, "ju", __VA_ARGS__)
+
+#define expect_zd_eq(a, b, ...) expect_cmp(ssize_t, a, b, ==, \
+ !=, "zd", __VA_ARGS__)
+#define expect_zd_ne(a, b, ...) expect_cmp(ssize_t, a, b, !=, \
+ ==, "zd", __VA_ARGS__)
+#define expect_zd_lt(a, b, ...) expect_cmp(ssize_t, a, b, <, \
+ >=, "zd", __VA_ARGS__)
+#define expect_zd_le(a, b, ...) expect_cmp(ssize_t, a, b, <=, \
+ >, "zd", __VA_ARGS__)
+#define expect_zd_ge(a, b, ...) expect_cmp(ssize_t, a, b, >=, \
+ <, "zd", __VA_ARGS__)
+#define expect_zd_gt(a, b, ...) expect_cmp(ssize_t, a, b, >, \
+ <=, "zd", __VA_ARGS__)
+
+#define expect_zu_eq(a, b, ...) expect_cmp(size_t, a, b, ==, \
+ !=, "zu", __VA_ARGS__)
+#define expect_zu_ne(a, b, ...) expect_cmp(size_t, a, b, !=, \
+ ==, "zu", __VA_ARGS__)
+#define expect_zu_lt(a, b, ...) expect_cmp(size_t, a, b, <, \
+ >=, "zu", __VA_ARGS__)
+#define expect_zu_le(a, b, ...) expect_cmp(size_t, a, b, <=, \
+ >, "zu", __VA_ARGS__)
+#define expect_zu_ge(a, b, ...) expect_cmp(size_t, a, b, >=, \
+ <, "zu", __VA_ARGS__)
+#define expect_zu_gt(a, b, ...) expect_cmp(size_t, a, b, >, \
+ <=, "zu", __VA_ARGS__)
+
+#define expect_d32_eq(a, b, ...) expect_cmp(int32_t, a, b, ==, \
+ !=, FMTd32, __VA_ARGS__)
+#define expect_d32_ne(a, b, ...) expect_cmp(int32_t, a, b, !=, \
+ ==, FMTd32, __VA_ARGS__)
+#define expect_d32_lt(a, b, ...) expect_cmp(int32_t, a, b, <, \
+ >=, FMTd32, __VA_ARGS__)
+#define expect_d32_le(a, b, ...) expect_cmp(int32_t, a, b, <=, \
+ >, FMTd32, __VA_ARGS__)
+#define expect_d32_ge(a, b, ...) expect_cmp(int32_t, a, b, >=, \
+ <, FMTd32, __VA_ARGS__)
+#define expect_d32_gt(a, b, ...) expect_cmp(int32_t, a, b, >, \
+ <=, FMTd32, __VA_ARGS__)
+
+#define expect_u32_eq(a, b, ...) expect_cmp(uint32_t, a, b, ==, \
+ !=, FMTu32, __VA_ARGS__)
+#define expect_u32_ne(a, b, ...) expect_cmp(uint32_t, a, b, !=, \
+ ==, FMTu32, __VA_ARGS__)
+#define expect_u32_lt(a, b, ...) expect_cmp(uint32_t, a, b, <, \
+ >=, FMTu32, __VA_ARGS__)
+#define expect_u32_le(a, b, ...) expect_cmp(uint32_t, a, b, <=, \
+ >, FMTu32, __VA_ARGS__)
+#define expect_u32_ge(a, b, ...) expect_cmp(uint32_t, a, b, >=, \
+ <, FMTu32, __VA_ARGS__)
+#define expect_u32_gt(a, b, ...) expect_cmp(uint32_t, a, b, >, \
+ <=, FMTu32, __VA_ARGS__)
+
+#define expect_d64_eq(a, b, ...) expect_cmp(int64_t, a, b, ==, \
+ !=, FMTd64, __VA_ARGS__)
+#define expect_d64_ne(a, b, ...) expect_cmp(int64_t, a, b, !=, \
+ ==, FMTd64, __VA_ARGS__)
+#define expect_d64_lt(a, b, ...) expect_cmp(int64_t, a, b, <, \
+ >=, FMTd64, __VA_ARGS__)
+#define expect_d64_le(a, b, ...) expect_cmp(int64_t, a, b, <=, \
+ >, FMTd64, __VA_ARGS__)
+#define expect_d64_ge(a, b, ...) expect_cmp(int64_t, a, b, >=, \
+ <, FMTd64, __VA_ARGS__)
+#define expect_d64_gt(a, b, ...) expect_cmp(int64_t, a, b, >, \
+ <=, FMTd64, __VA_ARGS__)
+
+#define expect_u64_eq(a, b, ...) expect_cmp(uint64_t, a, b, ==, \
+ !=, FMTu64, __VA_ARGS__)
+#define expect_u64_ne(a, b, ...) expect_cmp(uint64_t, a, b, !=, \
+ ==, FMTu64, __VA_ARGS__)
+#define expect_u64_lt(a, b, ...) expect_cmp(uint64_t, a, b, <, \
+ >=, FMTu64, __VA_ARGS__)
+#define expect_u64_le(a, b, ...) expect_cmp(uint64_t, a, b, <=, \
+ >, FMTu64, __VA_ARGS__)
+#define expect_u64_ge(a, b, ...) expect_cmp(uint64_t, a, b, >=, \
+ <, FMTu64, __VA_ARGS__)
+#define expect_u64_gt(a, b, ...) expect_cmp(uint64_t, a, b, >, \
+ <=, FMTu64, __VA_ARGS__)
+
+#define verify_b_eq(may_abort, a, b, ...) do { \
+ bool a_ = (a); \
+ bool b_ = (b); \
+ if (!(a_ == b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) == (%s) --> %s != %s: ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_ ? "true" : "false", \
+ b_ ? "true" : "false"); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ if (may_abort) { \
+ abort(); \
+ } else { \
+ p_test_fail(prefix, message); \
+ } \
+ } \
+} while (0)
+
+#define verify_b_ne(may_abort, a, b, ...) do { \
+ bool a_ = (a); \
+ bool b_ = (b); \
+ if (!(a_ != b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) != (%s) --> %s == %s: ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_ ? "true" : "false", \
+ b_ ? "true" : "false"); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ if (may_abort) { \
+ abort(); \
+ } else { \
+ p_test_fail(prefix, message); \
+ } \
+ } \
+} while (0)
+
+#define expect_b_eq(a, b, ...) verify_b_eq(false, a, b, __VA_ARGS__)
+#define expect_b_ne(a, b, ...) verify_b_ne(false, a, b, __VA_ARGS__)
+
+#define expect_true(a, ...) expect_b_eq(a, true, __VA_ARGS__)
+#define expect_false(a, ...) expect_b_eq(a, false, __VA_ARGS__)
+
+#define verify_str_eq(may_abort, a, b, ...) do { \
+ if (strcmp((a), (b))) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) same as (%s) --> " \
+ "\"%s\" differs from \"%s\": ", \
+ __func__, __FILE__, __LINE__, #a, #b, a, b); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ if (may_abort) { \
+ abort(); \
+ } else { \
+ p_test_fail(prefix, message); \
+ } \
+ } \
+} while (0)
+
+#define verify_str_ne(may_abort, a, b, ...) do { \
+ if (!strcmp((a), (b))) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) differs from (%s) --> " \
+ "\"%s\" same as \"%s\": ", \
+ __func__, __FILE__, __LINE__, #a, #b, a, b); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ if (may_abort) { \
+ abort(); \
+ } else { \
+ p_test_fail(prefix, message); \
+ } \
+ } \
+} while (0)
+
+#define expect_str_eq(a, b, ...) verify_str_eq(false, a, b, __VA_ARGS__)
+#define expect_str_ne(a, b, ...) verify_str_ne(false, a, b, __VA_ARGS__)
+
+#define verify_not_reached(may_abort, ...) do { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Unreachable code reached: ", \
+ __func__, __FILE__, __LINE__); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ if (may_abort) { \
+ abort(); \
+ } else { \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+
+#define expect_not_reached(...) verify_not_reached(false, __VA_ARGS__)
+
+#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) verify_cmp(true, \
+ t, a, b, cmp, neg_cmp, pri, __VA_ARGS__)
+
+#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \
+ !=, "p", __VA_ARGS__)
+#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \
+ ==, "p", __VA_ARGS__)
+#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \
+ !=, "p", __VA_ARGS__)
+#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \
+ ==, "p", __VA_ARGS__)
+
+#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
+#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
+#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
+#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
+#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
+#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
+
+#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
+#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
+#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
+#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
+#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
+#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
+
+#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
+#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
+#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
+#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
+#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
+#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
+
+#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
+#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
+#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
+#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
+#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
+#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
+
+#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \
+ !=, "ld", __VA_ARGS__)
+#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \
+ ==, "ld", __VA_ARGS__)
+#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \
+ >=, "ld", __VA_ARGS__)
+#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \
+ >, "ld", __VA_ARGS__)
+#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \
+ <, "ld", __VA_ARGS__)
+#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \
+ <=, "ld", __VA_ARGS__)
+
+#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \
+ a, b, ==, !=, "lu", __VA_ARGS__)
+#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \
+ a, b, !=, ==, "lu", __VA_ARGS__)
+#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \
+ a, b, <, >=, "lu", __VA_ARGS__)
+#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \
+ a, b, <=, >, "lu", __VA_ARGS__)
+#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \
+ a, b, >=, <, "lu", __VA_ARGS__)
+#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \
+ a, b, >, <=, "lu", __VA_ARGS__)
+
+#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \
+ !=, "qd", __VA_ARGS__)
+#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \
+ ==, "qd", __VA_ARGS__)
+#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \
+ >=, "qd", __VA_ARGS__)
+#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \
+ >, "qd", __VA_ARGS__)
+#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \
+ <, "qd", __VA_ARGS__)
+#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \
+ <=, "qd", __VA_ARGS__)
+
+#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, ==, !=, "qu", __VA_ARGS__)
+#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, !=, ==, "qu", __VA_ARGS__)
+#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, <, >=, "qu", __VA_ARGS__)
+#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, <=, >, "qu", __VA_ARGS__)
+#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, >=, <, "qu", __VA_ARGS__)
+#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, >, <=, "qu", __VA_ARGS__)
+
+#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \
+ !=, "jd", __VA_ARGS__)
+#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \
+ ==, "jd", __VA_ARGS__)
+#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \
+ >=, "jd", __VA_ARGS__)
+#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \
+ >, "jd", __VA_ARGS__)
+#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \
+ <, "jd", __VA_ARGS__)
+#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \
+ <=, "jd", __VA_ARGS__)
+
+#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \
+ !=, "ju", __VA_ARGS__)
+#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \
+ ==, "ju", __VA_ARGS__)
+#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \
+ >=, "ju", __VA_ARGS__)
+#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \
+ >, "ju", __VA_ARGS__)
+#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \
+ <, "ju", __VA_ARGS__)
+#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \
+ <=, "ju", __VA_ARGS__)
+
+#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \
+ !=, "zd", __VA_ARGS__)
+#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \
+ ==, "zd", __VA_ARGS__)
+#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \
+ >=, "zd", __VA_ARGS__)
+#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \
+ >, "zd", __VA_ARGS__)
+#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \
+ <, "zd", __VA_ARGS__)
+#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \
+ <=, "zd", __VA_ARGS__)
+
+#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \
+ !=, "zu", __VA_ARGS__)
+#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \
+ ==, "zu", __VA_ARGS__)
+#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \
+ >=, "zu", __VA_ARGS__)
+#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \
+ >, "zu", __VA_ARGS__)
+#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \
+ <, "zu", __VA_ARGS__)
+#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \
+ <=, "zu", __VA_ARGS__)
+
+#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \
+ !=, FMTd32, __VA_ARGS__)
+#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \
+ ==, FMTd32, __VA_ARGS__)
+#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \
+ >=, FMTd32, __VA_ARGS__)
+#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \
+ >, FMTd32, __VA_ARGS__)
+#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \
+ <, FMTd32, __VA_ARGS__)
+#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \
+ <=, FMTd32, __VA_ARGS__)
+
+#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \
+ !=, FMTu32, __VA_ARGS__)
+#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \
+ ==, FMTu32, __VA_ARGS__)
+#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \
+ >=, FMTu32, __VA_ARGS__)
+#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \
+ >, FMTu32, __VA_ARGS__)
+#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \
+ <, FMTu32, __VA_ARGS__)
+#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \
+ <=, FMTu32, __VA_ARGS__)
+
+#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \
+ !=, FMTd64, __VA_ARGS__)
+#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \
+ ==, FMTd64, __VA_ARGS__)
+#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \
+ >=, FMTd64, __VA_ARGS__)
+#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \
+ >, FMTd64, __VA_ARGS__)
+#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \
+ <, FMTd64, __VA_ARGS__)
+#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \
+ <=, FMTd64, __VA_ARGS__)
+
+#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \
+ !=, FMTu64, __VA_ARGS__)
+#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \
+ ==, FMTu64, __VA_ARGS__)
+#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \
+ >=, FMTu64, __VA_ARGS__)
+#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \
+ >, FMTu64, __VA_ARGS__)
+#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \
+ <, FMTu64, __VA_ARGS__)
+#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \
+ <=, FMTu64, __VA_ARGS__)
+
+#define assert_b_eq(a, b, ...) verify_b_eq(true, a, b, __VA_ARGS__)
+#define assert_b_ne(a, b, ...) verify_b_ne(true, a, b, __VA_ARGS__)
+
+#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__)
+#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__)
+
+#define assert_str_eq(a, b, ...) verify_str_eq(true, a, b, __VA_ARGS__)
+#define assert_str_ne(a, b, ...) verify_str_ne(true, a, b, __VA_ARGS__)
+
+#define assert_not_reached(...) verify_not_reached(true, __VA_ARGS__)
+
+/*
+ * If this enum changes, corresponding changes in test/test.sh.in are also
+ * necessary.
+ */
+typedef enum {
+ test_status_pass = 0,
+ test_status_skip = 1,
+ test_status_fail = 2,
+
+ test_status_count = 3
+} test_status_t;
+
+typedef void (test_t)(void);
+
+#define TEST_BEGIN(f) \
+static void \
+f(void) { \
+ p_test_init(#f);
+
+#define TEST_END \
+ goto label_test_end; \
+label_test_end: \
+ p_test_fini(); \
+}
+
+#define test(...) \
+ p_test(__VA_ARGS__, NULL)
+
+#define test_no_reentrancy(...) \
+ p_test_no_reentrancy(__VA_ARGS__, NULL)
+
+#define test_no_malloc_init(...) \
+ p_test_no_malloc_init(__VA_ARGS__, NULL)
+
+#define test_skip_if(e) do { \
+ if (e) { \
+ test_skip("%s:%s:%d: Test skipped: (%s)", \
+ __func__, __FILE__, __LINE__, #e); \
+ goto label_test_end; \
+ } \
+} while (0)
+
+bool test_is_reentrant();
+
+void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+
+/* For private use by macros. */
+test_status_t p_test(test_t *t, ...);
+test_status_t p_test_no_reentrancy(test_t *t, ...);
+test_status_t p_test_no_malloc_init(test_t *t, ...);
+void p_test_init(const char *name);
+void p_test_fini(void);
+void p_test_fail(const char *prefix, const char *message);
diff --git a/deps/jemalloc/test/include/test/thd.h b/deps/jemalloc/test/include/test/thd.h
new file mode 100644
index 0000000..47a5126
--- /dev/null
+++ b/deps/jemalloc/test/include/test/thd.h
@@ -0,0 +1,9 @@
+/* Abstraction layer for threading in tests. */
+#ifdef _WIN32
+typedef HANDLE thd_t;
+#else
+typedef pthread_t thd_t;
+#endif
+
+void thd_create(thd_t *thd, void *(*proc)(void *), void *arg);
+void thd_join(thd_t thd, void **ret);
diff --git a/deps/jemalloc/test/include/test/timer.h b/deps/jemalloc/test/include/test/timer.h
new file mode 100644
index 0000000..ace6191
--- /dev/null
+++ b/deps/jemalloc/test/include/test/timer.h
@@ -0,0 +1,11 @@
+/* Simple timer, for use in benchmark reporting. */
+
+typedef struct {
+ nstime_t t0;
+ nstime_t t1;
+} timedelta_t;
+
+void timer_start(timedelta_t *timer);
+void timer_stop(timedelta_t *timer);
+uint64_t timer_usec(const timedelta_t *timer);
+void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen);
diff --git a/deps/jemalloc/test/integration/MALLOCX_ARENA.c b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
new file mode 100644
index 0000000..7e61df0
--- /dev/null
+++ b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
@@ -0,0 +1,66 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+static bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg) {
+ unsigned thread_ind = (unsigned)(uintptr_t)arg;
+ unsigned arena_ind;
+ void *p;
+ size_t sz;
+
+ sz = sizeof(arena_ind);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Error in arenas.create");
+
+ if (thread_ind % 4 != 3) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ const char *dss_precs[] = {"disabled", "primary", "secondary"};
+ unsigned prec_ind = thread_ind %
+ (sizeof(dss_precs)/sizeof(char*));
+ const char *dss = dss_precs[prec_ind];
+ int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
+ expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Error in mallctlnametomib()");
+ mib[1] = arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
+ sizeof(const char *)), expected_err,
+ "Error in mallctlbymib()");
+ }
+
+ p = mallocx(1, MALLOCX_ARENA(arena_ind));
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ return NULL;
+}
+
+TEST_BEGIN(test_MALLOCX_ARENA) {
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)(uintptr_t)i);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_MALLOCX_ARENA);
+}
diff --git a/deps/jemalloc/test/integration/aligned_alloc.c b/deps/jemalloc/test/integration/aligned_alloc.c
new file mode 100644
index 0000000..b37d5ba
--- /dev/null
+++ b/deps/jemalloc/test/integration/aligned_alloc.c
@@ -0,0 +1,157 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors) {
+ size_t alignment;
+ void *p;
+
+ alignment = 0;
+ set_errno(0);
+ p = aligned_alloc(alignment, 1);
+ expect_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu", alignment);
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ set_errno(0);
+ p = aligned_alloc(alignment + 1, 1);
+ expect_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_oom_errors) {
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ expect_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ expect_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ expect_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_alignment_and_size) {
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 1;
+ size < 3 * alignment && size < (1U << 31);
+ size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ ps[i] = aligned_alloc(alignment, size);
+ if (ps[i] == NULL) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += TEST_MALLOC_SIZE(ps[i]);
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+TEST_BEGIN(test_zero_alloc) {
+ void *res = aligned_alloc(8, 0);
+ assert(res);
+ size_t usable = TEST_MALLOC_SIZE(res);
+ assert(usable > 0);
+ free(res);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size,
+ test_zero_alloc);
+}
diff --git a/deps/jemalloc/test/integration/allocated.c b/deps/jemalloc/test/integration/allocated.c
new file mode 100644
index 0000000..0c64272
--- /dev/null
+++ b/deps/jemalloc/test/integration/allocated.c
@@ -0,0 +1,124 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg) {
+ int err;
+ void *p;
+ uint64_t a0, a1, d0, d1;
+ uint64_t *ap0, *ap1, *dp0, *dp1;
+ size_t sz, usize;
+
+ sz = sizeof(a0);
+ if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(ap0);
+ if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ expect_u64_eq(*ap0, a0,
+ "\"thread.allocatedp\" should provide a pointer to internal "
+ "storage");
+
+ sz = sizeof(d0);
+ if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(dp0);
+ if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL,
+ 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ expect_u64_eq(*dp0, d0,
+ "\"thread.deallocatedp\" should provide a pointer to internal "
+ "storage");
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Unexpected malloc() error");
+
+ sz = sizeof(a1);
+ mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0);
+ sz = sizeof(ap1);
+ mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0);
+ expect_u64_eq(*ap1, a1,
+ "Dereferenced \"thread.allocatedp\" value should equal "
+ "\"thread.allocated\" value");
+ expect_ptr_eq(ap0, ap1,
+ "Pointer returned by \"thread.allocatedp\" should not change");
+
+ usize = TEST_MALLOC_SIZE(p);
+ expect_u64_le(a0 + usize, a1,
+ "Allocated memory counter should increase by at least the amount "
+ "explicitly allocated");
+
+ free(p);
+
+ sz = sizeof(d1);
+ mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0);
+ sz = sizeof(dp1);
+ mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0);
+ expect_u64_eq(*dp1, d1,
+ "Dereferenced \"thread.deallocatedp\" value should equal "
+ "\"thread.deallocated\" value");
+ expect_ptr_eq(dp0, dp1,
+ "Pointer returned by \"thread.deallocatedp\" should not change");
+
+ expect_u64_le(d0 + usize, d1,
+ "Deallocated memory counter should increase by at least the amount "
+ "explicitly deallocated");
+
+ return NULL;
+label_ENOENT:
+ expect_false(config_stats,
+ "ENOENT should only be returned if stats are disabled");
+ test_skip("\"thread.allocated\" mallctl not available");
+ return NULL;
+}
+
+TEST_BEGIN(test_main_thread) {
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread) {
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void) {
+ /* Run tests multiple times to check for bad interactions. */
+ return test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread);
+}
diff --git a/deps/jemalloc/test/integration/cpp/basic.cpp b/deps/jemalloc/test/integration/cpp/basic.cpp
new file mode 100644
index 0000000..c1cf6cd
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/basic.cpp
@@ -0,0 +1,24 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_basic) {
+ auto foo = new long(4);
+ expect_ptr_not_null(foo, "Unexpected new[] failure");
+ delete foo;
+ // Test nullptr handling.
+ foo = nullptr;
+ delete foo;
+
+ auto bar = new long;
+ expect_ptr_not_null(bar, "Unexpected new failure");
+ delete bar;
+ // Test nullptr handling.
+ bar = nullptr;
+ delete bar;
+}
+TEST_END
+
+int
+main() {
+ return test(
+ test_basic);
+}
diff --git a/deps/jemalloc/test/integration/cpp/infallible_new_false.cpp b/deps/jemalloc/test/integration/cpp/infallible_new_false.cpp
new file mode 100644
index 0000000..42196d6
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/infallible_new_false.cpp
@@ -0,0 +1,23 @@
+#include <memory>
+
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_failing_alloc) {
+ bool saw_exception = false;
+ try {
+ /* Too big of an allocation to succeed. */
+ void *volatile ptr = ::operator new((size_t)-1);
+ (void)ptr;
+ } catch (...) {
+ saw_exception = true;
+ }
+ expect_true(saw_exception, "Didn't get a failure");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_failing_alloc);
+}
+
diff --git a/deps/jemalloc/test/integration/cpp/infallible_new_false.sh b/deps/jemalloc/test/integration/cpp/infallible_new_false.sh
new file mode 100644
index 0000000..7d41812
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/infallible_new_false.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+XMALLOC_STR=""
+if [ "x${enable_xmalloc}" = "x1" ] ; then
+ XMALLOC_STR="xmalloc:false,"
+fi
+
+export MALLOC_CONF="${XMALLOC_STR}experimental_infallible_new:false"
diff --git a/deps/jemalloc/test/integration/cpp/infallible_new_true.cpp b/deps/jemalloc/test/integration/cpp/infallible_new_true.cpp
new file mode 100644
index 0000000..d675412
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/infallible_new_true.cpp
@@ -0,0 +1,67 @@
+#include <stdio.h>
+
+#include "test/jemalloc_test.h"
+
+/*
+ * We can't test C++ in unit tests. In order to intercept abort, use a secret
+ * safety check abort hook in integration tests.
+ */
+typedef void (*abort_hook_t)(const char *message);
+bool fake_abort_called;
+void fake_abort(const char *message) {
+ if (strcmp(message, "<jemalloc>: Allocation failed and "
+ "opt.experimental_infallible_new is true. Aborting.\n") != 0) {
+ abort();
+ }
+ fake_abort_called = true;
+}
+
+static bool
+own_operator_new(void) {
+ uint64_t before, after;
+ size_t sz = sizeof(before);
+
+ /* thread.allocated is always available, even w/o config_stats. */
+ expect_d_eq(mallctl("thread.allocated", (void *)&before, &sz, NULL, 0),
+ 0, "Unexpected mallctl failure reading stats");
+ void *volatile ptr = ::operator new((size_t)8);
+ expect_ptr_not_null(ptr, "Unexpected allocation failure");
+ expect_d_eq(mallctl("thread.allocated", (void *)&after, &sz, NULL, 0),
+ 0, "Unexpected mallctl failure reading stats");
+
+ return (after != before);
+}
+
+TEST_BEGIN(test_failing_alloc) {
+ abort_hook_t abort_hook = &fake_abort;
+ expect_d_eq(mallctl("experimental.hooks.safety_check_abort", NULL, NULL,
+ (void *)&abort_hook, sizeof(abort_hook)), 0,
+ "Unexpected mallctl failure setting abort hook");
+
+ /*
+ * Not owning operator new is only expected to happen on MinGW which
+ * does not support operator new / delete replacement.
+ */
+#ifdef _WIN32
+ test_skip_if(!own_operator_new());
+#else
+ expect_true(own_operator_new(), "No operator new overload");
+#endif
+ void *volatile ptr = (void *)1;
+ try {
+ /* Too big of an allocation to succeed. */
+ ptr = ::operator new((size_t)-1);
+ } catch (...) {
+ abort();
+ }
+ expect_ptr_null(ptr, "Allocation should have failed");
+ expect_b_eq(fake_abort_called, true, "Abort hook not invoked");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_failing_alloc);
+}
+
diff --git a/deps/jemalloc/test/integration/cpp/infallible_new_true.sh b/deps/jemalloc/test/integration/cpp/infallible_new_true.sh
new file mode 100644
index 0000000..4a0ff54
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/infallible_new_true.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+XMALLOC_STR=""
+if [ "x${enable_xmalloc}" = "x1" ] ; then
+ XMALLOC_STR="xmalloc:false,"
+fi
+
+export MALLOC_CONF="${XMALLOC_STR}experimental_infallible_new:true"
diff --git a/deps/jemalloc/test/integration/extent.c b/deps/jemalloc/test/integration/extent.c
new file mode 100644
index 0000000..7a028f1
--- /dev/null
+++ b/deps/jemalloc/test/integration/extent.c
@@ -0,0 +1,287 @@
+#include "test/jemalloc_test.h"
+
+#include "test/extent_hooks.h"
+
+#include "jemalloc/internal/arena_types.h"
+
+static void
+test_extent_body(unsigned arena_ind) {
+ void *p;
+ size_t large0, large1, large2, sz;
+ size_t purge_mib[3];
+ size_t purge_miblen;
+ int flags;
+ bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
+
+ flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ /* Get large size classes. */
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.0.size failure");
+ expect_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.1.size failure");
+ expect_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.2.size failure");
+
+ /* Test dalloc/decommit/purge cascade. */
+ purge_miblen = sizeof(purge_mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ purge_mib[1] = (size_t)arena_ind;
+ called_alloc = false;
+ try_alloc = true;
+ try_dalloc = false;
+ try_decommit = false;
+ p = mallocx(large0 * 2, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ expect_true(called_alloc, "Expected alloc call");
+ called_dalloc = false;
+ called_decommit = false;
+ did_purge_lazy = false;
+ did_purge_forced = false;
+ called_split = false;
+ xallocx_success_a = (xallocx(p, large0, 0, flags) == large0);
+ expect_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_a) {
+ expect_true(called_dalloc, "Expected dalloc call");
+ expect_true(called_decommit, "Expected decommit call");
+ expect_true(did_purge_lazy || did_purge_forced,
+ "Expected purge");
+ expect_true(called_split, "Expected split call");
+ }
+ dallocx(p, flags);
+ try_dalloc = true;
+
+ /* Test decommit/commit and observe split/merge. */
+ try_dalloc = false;
+ try_decommit = true;
+ p = mallocx(large0 * 2, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ did_decommit = false;
+ did_commit = false;
+ called_split = false;
+ did_split = false;
+ did_merge = false;
+ xallocx_success_b = (xallocx(p, large0, 0, flags) == large0);
+ expect_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_b) {
+ expect_true(did_split, "Expected split");
+ }
+ xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2);
+ if (did_split) {
+ expect_b_eq(did_decommit, did_commit,
+ "Expected decommit/commit match");
+ }
+ if (xallocx_success_b && xallocx_success_c) {
+ expect_true(did_merge, "Expected merge");
+ }
+ dallocx(p, flags);
+ try_dalloc = true;
+ try_decommit = false;
+
+ /* Make sure non-large allocation succeeds. */
+ p = mallocx(42, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, flags);
+}
+
+static void
+test_manual_hook_auto_arena(void) {
+ unsigned narenas;
+ size_t old_size, new_size, sz;
+ size_t hooks_mib[3];
+ size_t hooks_miblen;
+ extent_hooks_t *new_hooks, *old_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ /* Get number of auto arenas. */
+ expect_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ if (narenas == 1) {
+ return;
+ }
+
+ /* Install custom extent hooks on arena 1 (might not be initialized). */
+ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
+ &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
+ hooks_mib[1] = 1;
+ old_size = sizeof(extent_hooks_t *);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, (void *)&new_hooks, new_size), 0,
+ "Unexpected extent_hooks error");
+ static bool auto_arena_created = false;
+ if (old_hooks != &hooks) {
+ expect_b_eq(auto_arena_created, false,
+ "Expected auto arena 1 created only once.");
+ auto_arena_created = true;
+ }
+}
+
+static void
+test_manual_hook_body(void) {
+ unsigned arena_ind;
+ size_t old_size, new_size, sz;
+ size_t hooks_mib[3];
+ size_t hooks_miblen;
+ extent_hooks_t *new_hooks, *old_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+
+ /* Install custom extent hooks. */
+ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
+ &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
+ hooks_mib[1] = (size_t)arena_ind;
+ old_size = sizeof(extent_hooks_t *);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, (void *)&new_hooks, new_size), 0,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->alloc, extent_alloc_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->dalloc, extent_dalloc_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->commit, extent_commit_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->decommit, extent_decommit_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->split, extent_split_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->merge, extent_merge_hook,
+ "Unexpected extent_hooks error");
+
+ if (!is_background_thread_enabled()) {
+ test_extent_body(arena_ind);
+ }
+
+ /* Restore extent hooks. */
+ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
+ (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error");
+ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, NULL, 0), 0, "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->alloc, default_hooks->alloc,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->dalloc, default_hooks->dalloc,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->commit, default_hooks->commit,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->decommit, default_hooks->decommit,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->split, default_hooks->split,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->merge, default_hooks->merge,
+ "Unexpected extent_hooks error");
+}
+
+TEST_BEGIN(test_extent_manual_hook) {
+ test_manual_hook_auto_arena();
+ test_manual_hook_body();
+
+ /* Test failure paths. */
+ try_split = false;
+ test_manual_hook_body();
+ try_merge = false;
+ test_manual_hook_body();
+ try_purge_lazy = false;
+ try_purge_forced = false;
+ test_manual_hook_body();
+
+ try_split = try_merge = try_purge_lazy = try_purge_forced = true;
+}
+TEST_END
+
+TEST_BEGIN(test_extent_auto_hook) {
+ unsigned arena_ind;
+ size_t new_size, sz;
+ extent_hooks_t *new_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure");
+
+ test_skip_if(is_background_thread_enabled());
+ test_extent_body(arena_ind);
+}
+TEST_END
+
+static void
+test_arenas_create_ext_base(arena_config_t config,
+ bool expect_hook_data, bool expect_hook_metadata)
+{
+ unsigned arena, arena1;
+ void *ptr;
+ size_t sz = sizeof(unsigned);
+
+ extent_hooks_prep();
+
+ called_alloc = false;
+ expect_d_eq(mallctl("experimental.arenas_create_ext",
+ (void *)&arena, &sz, &config, sizeof(arena_config_t)), 0,
+ "Unexpected mallctl() failure");
+ expect_b_eq(called_alloc, expect_hook_metadata,
+ "expected hook metadata alloc mismatch");
+
+ called_alloc = false;
+ ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
+ expect_b_eq(called_alloc, expect_hook_data,
+ "expected hook data alloc mismatch");
+
+ expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
+ expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
+ 0, "Unexpected mallctl() failure");
+ expect_u_eq(arena, arena1, "Unexpected arena index");
+ dallocx(ptr, 0);
+}
+
+TEST_BEGIN(test_arenas_create_ext_with_ehooks_no_metadata) {
+ arena_config_t config;
+ config.extent_hooks = &hooks;
+ config.metadata_use_hooks = false;
+
+ test_arenas_create_ext_base(config, true, false);
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_create_ext_with_ehooks_with_metadata) {
+ arena_config_t config;
+ config.extent_hooks = &hooks;
+ config.metadata_use_hooks = true;
+
+ test_arenas_create_ext_base(config, true, true);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_extent_manual_hook,
+ test_extent_auto_hook,
+ test_arenas_create_ext_with_ehooks_no_metadata,
+ test_arenas_create_ext_with_ehooks_with_metadata);
+}
diff --git a/deps/jemalloc/test/integration/extent.sh b/deps/jemalloc/test/integration/extent.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/extent.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/malloc.c b/deps/jemalloc/test/integration/malloc.c
new file mode 100644
index 0000000..ef44916
--- /dev/null
+++ b/deps/jemalloc/test/integration/malloc.c
@@ -0,0 +1,16 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_zero_alloc) {
+ void *res = malloc(0);
+ assert(res);
+ size_t usable = TEST_MALLOC_SIZE(res);
+ assert(usable > 0);
+ free(res);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_zero_alloc);
+}
diff --git a/deps/jemalloc/test/integration/mallocx.c b/deps/jemalloc/test/integration/mallocx.c
new file mode 100644
index 0000000..fdf1e3f
--- /dev/null
+++ b/deps/jemalloc/test/integration/mallocx.c
@@ -0,0 +1,274 @@
+#include "test/jemalloc_test.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ expect_ptr_null(mallocx(largemax+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", largemax+1);
+
+ expect_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ expect_ptr_null(mallocx(SIZE_T_MAX, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+ expect_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+static void *
+remote_alloc(void *arg) {
+ unsigned arena;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ size_t large_sz;
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena)
+ | MALLOCX_TCACHE_NONE);
+ void **ret = (void **)arg;
+ *ret = ptr;
+
+ return NULL;
+}
+
+TEST_BEGIN(test_remote_free) {
+ thd_t thd;
+ void *ret;
+ thd_create(&thd, remote_alloc, (void *)&ret);
+ thd_join(thd, NULL);
+ expect_ptr_not_null(ret, "Unexpected mallocx failure");
+
+ /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
+ dallocx(ret, 0);
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_oom) {
+ size_t largemax;
+ bool oom;
+ void *ptrs[3];
+ unsigned i;
+
+ /*
+ * It should be impossible to allocate three objects that each consume
+ * nearly half the virtual address space.
+ */
+ largemax = get_large_size(get_nlarge()-1);
+ oom = false;
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ ptrs[i] = mallocx(largemax, MALLOCX_ARENA(0));
+ if (ptrs[i] == NULL) {
+ oom = true;
+ }
+ }
+ expect_true(oom,
+ "Expected OOM during series of calls to mallocx(size=%zu, 0)",
+ largemax);
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ if (ptrs[i] != NULL) {
+ dallocx(ptrs[i], 0);
+ }
+ }
+ purge();
+
+#if LG_SIZEOF_PTR == 3
+ expect_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x8000000000000000ULL)),
+ "Expected OOM for mallocx()");
+ expect_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x80000000)),
+ "Expected OOM for mallocx()");
+#else
+ expect_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
+ "Expected OOM for mallocx()");
+#endif
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_basic) {
+#define MAXSZ (((size_t)1) << 23)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ size_t nsz, rsz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, 0);
+ expect_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ rsz = sallocx(p, 0);
+ expect_zu_ge(rsz, sz, "Real size smaller than expected");
+ expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ dallocx(p, 0);
+
+ p = mallocx(sz, 0);
+ expect_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ZERO);
+ expect_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
+ nsz);
+ rsz = sallocx(p, 0);
+ expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ dallocx(p, 0);
+ purge();
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ const char *percpu_arena;
+ size_t sz = sizeof(percpu_arena);
+
+ if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
+ strcmp(percpu_arena, "disabled") != 0) {
+ test_skip("test_alignment_and_size skipped: "
+ "not working with percpu arena.");
+ };
+#define MAXALIGN (((size_t)1) << 23)
+#define NITER 4
+ size_t nsz, rsz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO | MALLOCX_ARENA(0));
+ expect_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO | MALLOCX_ARENA(0));
+ expect_ptr_not_null(ps[i],
+ "mallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ expect_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef MAXALIGN
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_overflow,
+ test_oom,
+ test_remote_free,
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/mallocx.sh b/deps/jemalloc/test/integration/mallocx.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/mallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/overflow.c b/deps/jemalloc/test/integration/overflow.c
new file mode 100644
index 0000000..ce63327
--- /dev/null
+++ b/deps/jemalloc/test/integration/overflow.c
@@ -0,0 +1,59 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ unsigned nlextents;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+ void *p;
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ 0), 0, "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nlextents - 1;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
+ NULL, 0), 0, "Unexpected mallctlbymib() error");
+
+ expect_ptr_null(malloc(max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ expect_ptr_null(malloc(SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ expect_ptr_null(calloc(1, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ expect_ptr_null(calloc(1, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Unexpected malloc() OOM");
+ expect_ptr_null(realloc(p, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ expect_ptr_null(realloc(p, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+ free(p);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+int
+main(void) {
+ return test(
+ test_overflow);
+}
diff --git a/deps/jemalloc/test/integration/posix_memalign.c b/deps/jemalloc/test/integration/posix_memalign.c
new file mode 100644
index 0000000..2da0549
--- /dev/null
+++ b/deps/jemalloc/test/integration/posix_memalign.c
@@ -0,0 +1,128 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors) {
+ size_t alignment;
+ void *p;
+
+ for (alignment = 0; alignment < sizeof(void *); alignment++) {
+ expect_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment);
+ }
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ expect_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors) {
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ expect_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ expect_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ expect_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ int err;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 0;
+ size < 3 * alignment && size < (1U << 31);
+ size += ((size == 0) ? 1 :
+ (alignment >> (LG_SIZEOF_PTR-1)) - 1)) {
+ for (i = 0; i < NITER; i++) {
+ err = posix_memalign(&ps[i],
+ alignment, size);
+ if (err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += TEST_MALLOC_SIZE(ps[i]);
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/rallocx.c b/deps/jemalloc/test/integration/rallocx.c
new file mode 100644
index 0000000..68b8f38
--- /dev/null
+++ b/deps/jemalloc/test/integration/rallocx.c
@@ -0,0 +1,308 @@
+#include "test/jemalloc_test.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+TEST_BEGIN(test_grow_and_shrink) {
+ /*
+ * Use volatile to workaround buffer overflow false positives
+ * (-D_FORTIFY_SOURCE=3).
+ */
+ void *volatile p, *volatile q;
+ size_t tsz;
+#define NCYCLES 3
+ unsigned i, j;
+#define NSZS 1024
+ size_t szs[NSZS];
+#define MAXSZ ZU(12 * 1024 * 1024)
+
+ p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ szs[0] = sallocx(p, 0);
+
+ for (i = 0; i < NCYCLES; i++) {
+ for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
+ q = rallocx(p, szs[j-1]+1, 0);
+ expect_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j-1], szs[j-1]+1);
+ szs[j] = sallocx(q, 0);
+ expect_zu_ne(szs[j], szs[j-1]+1,
+ "Expected size to be at least: %zu", szs[j-1]+1);
+ p = q;
+ }
+
+ for (j--; j > 0; j--) {
+ q = rallocx(p, szs[j-1], 0);
+ expect_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j], szs[j-1]);
+ tsz = sallocx(q, 0);
+ expect_zu_eq(tsz, szs[j-1],
+ "Expected size=%zu, got size=%zu", szs[j-1], tsz);
+ p = q;
+ }
+ }
+
+ dallocx(p, 0);
+#undef MAXSZ
+#undef NSZS
+#undef NCYCLES
+}
+TEST_END
+
+static bool
+validate_fill(void *p, uint8_t c, size_t offset, size_t len) {
+ bool ret = false;
+ /*
+ * Use volatile to workaround buffer overflow false positives
+ * (-D_FORTIFY_SOURCE=3).
+ */
+ uint8_t *volatile buf = (uint8_t *)p;
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ uint8_t b = buf[offset+i];
+ if (b != c) {
+ test_fail("Allocation at %p (len=%zu) contains %#x "
+ "rather than %#x at offset %zu", p, len, b, c,
+ offset+i);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+TEST_BEGIN(test_zero) {
+ /*
+ * Use volatile to workaround buffer overflow false positives
+ * (-D_FORTIFY_SOURCE=3).
+ */
+ void *volatile p, *volatile q;
+ size_t psz, qsz, i, j;
+ size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
+#define FILL_BYTE 0xaaU
+#define RANGE 2048
+
+ for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
+ size_t start_size = start_sizes[i];
+ p = mallocx(start_size, MALLOCX_ZERO);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ psz = sallocx(p, 0);
+
+ expect_false(validate_fill(p, 0, 0, psz),
+ "Expected zeroed memory");
+ memset(p, FILL_BYTE, psz);
+ expect_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+
+ for (j = 1; j < RANGE; j++) {
+ q = rallocx(p, start_size+j, MALLOCX_ZERO);
+ expect_ptr_not_null(q, "Unexpected rallocx() error");
+ qsz = sallocx(q, 0);
+ if (q != p || qsz != psz) {
+ expect_false(validate_fill(q, FILL_BYTE, 0,
+ psz), "Expected filled memory");
+ expect_false(validate_fill(q, 0, psz, qsz-psz),
+ "Expected zeroed memory");
+ }
+ if (psz != qsz) {
+ memset((void *)((uintptr_t)q+psz), FILL_BYTE,
+ qsz-psz);
+ psz = qsz;
+ }
+ p = q;
+ }
+ expect_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+ dallocx(p, 0);
+ }
+#undef FILL_BYTE
+}
+TEST_END
+
+TEST_BEGIN(test_align) {
+ void *p, *q;
+ size_t align;
+#define MAX_ALIGN (ZU(1) << 25)
+
+ align = ZU(1);
+ p = mallocx(1, MALLOCX_ALIGN(align));
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
+ q = rallocx(p, 1, MALLOCX_ALIGN(align));
+ expect_ptr_not_null(q,
+ "Unexpected rallocx() error for align=%zu", align);
+ expect_ptr_null(
+ (void *)((uintptr_t)q & (align-1)),
+ "%p inadequately aligned for align=%zu",
+ q, align);
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_ALIGN
+}
+TEST_END
+
+TEST_BEGIN(test_align_enum) {
+/* Span both small sizes and large sizes. */
+#define LG_MIN 12
+#define LG_MAX 15
+ for (size_t lg_align = LG_MIN; lg_align <= LG_MAX; ++lg_align) {
+ for (size_t lg_size = LG_MIN; lg_size <= LG_MAX; ++lg_size) {
+ size_t size = 1 << lg_size;
+ for (size_t lg_align_next = LG_MIN;
+ lg_align_next <= LG_MAX; ++lg_align_next) {
+ int flags = MALLOCX_LG_ALIGN(lg_align);
+ void *p = mallocx(1, flags);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx() error");
+ assert_zu_eq(nallocx(1, flags),
+ TEST_MALLOC_SIZE(p),
+ "Wrong mallocx() usable size");
+ int flags_next =
+ MALLOCX_LG_ALIGN(lg_align_next);
+ p = rallocx(p, size, flags_next);
+ assert_ptr_not_null(p,
+ "Unexpected rallocx() error");
+ expect_zu_eq(nallocx(size, flags_next),
+ TEST_MALLOC_SIZE(p),
+ "Wrong rallocx() usable size");
+ free(p);
+ }
+ }
+ }
+#undef LG_MAX
+#undef LG_MIN
+}
+TEST_END
+
+TEST_BEGIN(test_lg_align_and_zero) {
+ /*
+ * Use volatile to workaround buffer overflow false positives
+ * (-D_FORTIFY_SOURCE=3).
+ */
+ void *volatile p, *volatile q;
+ unsigned lg_align;
+ size_t sz;
+#define MAX_LG_ALIGN 25
+#define MAX_VALIDATE (ZU(1) << 22)
+
+ lg_align = 0;
+ p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
+ q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ expect_ptr_not_null(q,
+ "Unexpected rallocx() error for lg_align=%u", lg_align);
+ expect_ptr_null(
+ (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
+ "%p inadequately aligned for lg_align=%u", q, lg_align);
+ sz = sallocx(q, 0);
+ if ((sz << 1) <= MAX_VALIDATE) {
+ expect_false(validate_fill(q, 0, 0, sz),
+ "Expected zeroed memory");
+ } else {
+ expect_false(validate_fill(q, 0, 0, MAX_VALIDATE),
+ "Expected zeroed memory");
+ expect_false(validate_fill(
+ (void *)((uintptr_t)q+sz-MAX_VALIDATE),
+ 0, 0, MAX_VALIDATE), "Expected zeroed memory");
+ }
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_VALIDATE
+#undef MAX_LG_ALIGN
+}
+TEST_END
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+ void *p;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ expect_ptr_null(rallocx(p, largemax+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1);
+
+ expect_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ expect_ptr_null(rallocx(p, SIZE_T_MAX, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
+
+ expect_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+int
+main(void) {
+ return test(
+ test_grow_and_shrink,
+ test_zero,
+ test_align,
+ test_align_enum,
+ test_lg_align_and_zero,
+ test_overflow);
+}
diff --git a/deps/jemalloc/test/integration/sdallocx.c b/deps/jemalloc/test/integration/sdallocx.c
new file mode 100644
index 0000000..ca01448
--- /dev/null
+++ b/deps/jemalloc/test/integration/sdallocx.c
@@ -0,0 +1,55 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 22)
+#define NITER 3
+
+TEST_BEGIN(test_basic) {
+ void *ptr = mallocx(64, 0);
+ sdallocx(ptr, 64, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ size_t nsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ total += nsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ sdallocx(ps[i], sz,
+ MALLOCX_ALIGN(alignment));
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/slab_sizes.c b/deps/jemalloc/test/integration/slab_sizes.c
new file mode 100644
index 0000000..f6a66f2
--- /dev/null
+++ b/deps/jemalloc/test/integration/slab_sizes.c
@@ -0,0 +1,80 @@
+#include "test/jemalloc_test.h"
+
+/* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */
+
+TEST_BEGIN(test_slab_sizes) {
+ unsigned nbins;
+ size_t page;
+ size_t sizemib[4];
+ size_t slabmib[4];
+ size_t len;
+
+ len = sizeof(nbins);
+ expect_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
+ "nbins mallctl failure");
+
+ len = sizeof(page);
+ expect_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0,
+ "page mallctl failure");
+
+ len = 4;
+ expect_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0,
+ "bin size mallctlnametomib failure");
+
+ len = 4;
+ expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len),
+ 0, "slab size mallctlnametomib failure");
+
+ size_t biggest_slab_seen = 0;
+
+ for (unsigned i = 0; i < nbins; i++) {
+ size_t bin_size;
+ size_t slab_size;
+ len = sizeof(size_t);
+ sizemib[2] = i;
+ slabmib[2] = i;
+ expect_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len,
+ NULL, 0), 0, "bin size mallctlbymib failure");
+
+ len = sizeof(size_t);
+ expect_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len,
+ NULL, 0), 0, "slab size mallctlbymib failure");
+
+ if (bin_size < 100) {
+ /*
+ * Then we should be as close to 17 as possible. Since
+ * not all page sizes are valid (because of bitmap
+ * limitations on the number of items in a slab), we
+ * should at least make sure that the number of pages
+ * goes up.
+ */
+ expect_zu_ge(slab_size, biggest_slab_seen,
+ "Slab sizes should go up");
+ biggest_slab_seen = slab_size;
+ } else if (
+ (100 <= bin_size && bin_size < 128)
+ || (128 < bin_size && bin_size <= 200)) {
+ expect_zu_eq(slab_size, page,
+ "Forced-small slabs should be small");
+ } else if (bin_size == 128) {
+ expect_zu_eq(slab_size, 2 * page,
+ "Forced-2-page slab should be 2 pages");
+ } else if (200 < bin_size && bin_size <= 4096) {
+ expect_zu_ge(slab_size, biggest_slab_seen,
+ "Slab sizes should go up");
+ biggest_slab_seen = slab_size;
+ }
+ }
+ /*
+ * For any reasonable configuration, 17 pages should be a valid slab
+ * size for 4096-byte items.
+ */
+ expect_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_slab_sizes);
+}
diff --git a/deps/jemalloc/test/integration/slab_sizes.sh b/deps/jemalloc/test/integration/slab_sizes.sh
new file mode 100644
index 0000000..07e3db8
--- /dev/null
+++ b/deps/jemalloc/test/integration/slab_sizes.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+# Some screwy-looking slab sizes.
+export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2"
diff --git a/deps/jemalloc/test/integration/smallocx.c b/deps/jemalloc/test/integration/smallocx.c
new file mode 100644
index 0000000..389319b
--- /dev/null
+++ b/deps/jemalloc/test/integration/smallocx.c
@@ -0,0 +1,312 @@
+#include "test/jemalloc_test.h"
+#include "jemalloc/jemalloc_macros.h"
+
+#define STR_HELPER(x) #x
+#define STR(x) STR_HELPER(x)
+
+#ifndef JEMALLOC_VERSION_GID_IDENT
+ #error "JEMALLOC_VERSION_GID_IDENT not defined"
+#endif
+
+#define JOIN(x, y) x ## y
+#define JOIN2(x, y) JOIN(x, y)
+#define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT)
+
+typedef struct {
+ void *ptr;
+ size_t size;
+} smallocx_return_t;
+
+extern smallocx_return_t
+smallocx(size_t size, int flags);
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ expect_ptr_null(smallocx(largemax+1, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", largemax+1);
+
+ expect_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ expect_ptr_null(smallocx(SIZE_T_MAX, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+ expect_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr,
+ "Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+static void *
+remote_alloc(void *arg) {
+ unsigned arena;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ size_t large_sz;
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ smallocx_return_t r
+ = smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
+ void *ptr = r.ptr;
+ expect_zu_eq(r.size,
+ nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE),
+ "Expected smalloc(size,flags).size == nallocx(size,flags)");
+ void **ret = (void **)arg;
+ *ret = ptr;
+
+ return NULL;
+}
+
+TEST_BEGIN(test_remote_free) {
+ thd_t thd;
+ void *ret;
+ thd_create(&thd, remote_alloc, (void *)&ret);
+ thd_join(thd, NULL);
+ expect_ptr_not_null(ret, "Unexpected smallocx failure");
+
+ /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
+ dallocx(ret, 0);
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_oom) {
+ size_t largemax;
+ bool oom;
+ void *ptrs[3];
+ unsigned i;
+
+ /*
+ * It should be impossible to allocate three objects that each consume
+ * nearly half the virtual address space.
+ */
+ largemax = get_large_size(get_nlarge()-1);
+ oom = false;
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ ptrs[i] = smallocx(largemax, 0).ptr;
+ if (ptrs[i] == NULL) {
+ oom = true;
+ }
+ }
+ expect_true(oom,
+ "Expected OOM during series of calls to smallocx(size=%zu, 0)",
+ largemax);
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ if (ptrs[i] != NULL) {
+ dallocx(ptrs[i], 0);
+ }
+ }
+ purge();
+
+#if LG_SIZEOF_PTR == 3
+ expect_ptr_null(smallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x8000000000000000ULL)).ptr,
+ "Expected OOM for smallocx()");
+ expect_ptr_null(smallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x80000000)).ptr,
+ "Expected OOM for smallocx()");
+#else
+ expect_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr,
+ "Expected OOM for smallocx()");
+#endif
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_basic) {
+#define MAXSZ (((size_t)1) << 23)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ smallocx_return_t ret;
+ size_t nsz, rsz, smz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ ret = smallocx(sz, 0);
+ p = ret.ptr;
+ smz = ret.size;
+ expect_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=0) error", sz);
+ rsz = sallocx(p, 0);
+ expect_zu_ge(rsz, sz, "Real size smaller than expected");
+ expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
+ dallocx(p, 0);
+
+ ret = smallocx(sz, 0);
+ p = ret.ptr;
+ smz = ret.size;
+ expect_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=0) error", sz);
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ expect_zu_ne(smz, 0, "Unexpected smallocx() error");
+ ret = smallocx(sz, MALLOCX_ZERO);
+ p = ret.ptr;
+ expect_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error",
+ nsz);
+ rsz = sallocx(p, 0);
+ expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
+ dallocx(p, 0);
+ purge();
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ const char *percpu_arena;
+ size_t sz = sizeof(percpu_arena);
+
+ if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
+ strcmp(percpu_arena, "disabled") != 0) {
+ test_skip("test_alignment_and_size skipped: "
+ "not working with percpu arena.");
+ };
+#define MAXALIGN (((size_t)1) << 23)
+#define NITER 4
+ size_t nsz, rsz, smz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ expect_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ smallocx_return_t ret
+ = smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO);
+ ps[i] = ret.ptr;
+ expect_ptr_not_null(ps[i],
+ "smallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ smz = ret.size;
+ expect_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_zu_eq(nsz, smz,
+ "nallocx()/smallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef MAXALIGN
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_overflow,
+ test_oom,
+ test_remote_free,
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/smallocx.sh b/deps/jemalloc/test/integration/smallocx.sh
new file mode 100644
index 0000000..d07f10f
--- /dev/null
+++ b/deps/jemalloc/test/integration/smallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/thread_arena.c b/deps/jemalloc/test/integration/thread_arena.c
new file mode 100644
index 0000000..4a6abf6
--- /dev/null
+++ b/deps/jemalloc/test/integration/thread_arena.c
@@ -0,0 +1,86 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+void *
+thd_start(void *arg) {
+ unsigned main_arena_ind = *(unsigned *)arg;
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Error in malloc()");
+ free(p);
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&arena_ind, &size,
+ (void *)&main_arena_ind, sizeof(main_arena_ind)))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
+ 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+ expect_u_eq(arena_ind, main_arena_ind,
+ "Arena index should be same as for main thread");
+
+ return NULL;
+}
+
+static void
+mallctl_failure(int err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+}
+
+TEST_BEGIN(test_thread_arena) {
+ void *p;
+ int err;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Error in malloc()");
+
+ unsigned arena_ind, old_arena_ind;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Arena creation failure");
+
+ size_t size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size,
+ (void *)&arena_ind, sizeof(arena_ind))) != 0) {
+ mallctl_failure(err);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)&arena_ind);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ intptr_t join_ret;
+ thd_join(thds[i], (void *)&join_ret);
+ expect_zd_eq(join_ret, 0, "Unexpected thread join error");
+ }
+ free(p);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_thread_arena);
+}
diff --git a/deps/jemalloc/test/integration/thread_tcache_enabled.c b/deps/jemalloc/test/integration/thread_tcache_enabled.c
new file mode 100644
index 0000000..d44dbe9
--- /dev/null
+++ b/deps/jemalloc/test/integration/thread_tcache_enabled.c
@@ -0,0 +1,87 @@
+#include "test/jemalloc_test.h"
+
+void *
+thd_start(void *arg) {
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ if (e0) {
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+ }
+
+ e1 = true;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_false(e0, "tcache should be disabled");
+
+ e1 = true;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ return NULL;
+}
+
+TEST_BEGIN(test_main_thread) {
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread) {
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void) {
+ /* Run tests multiple times to check for bad interactions. */
+ return test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread);
+}
diff --git a/deps/jemalloc/test/integration/xallocx.c b/deps/jemalloc/test/integration/xallocx.c
new file mode 100644
index 0000000..1370854
--- /dev/null
+++ b/deps/jemalloc/test/integration/xallocx.c
@@ -0,0 +1,384 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Use a separate arena for xallocx() extension/contraction tests so that
+ * internal allocation e.g. by heap profiling can't interpose allocations where
+ * xallocx() would ordinarily be able to extend.
+ */
+static unsigned
+arena_ind(void) {
+ static unsigned ind = 0;
+
+ if (ind == 0) {
+ size_t sz = sizeof(ind);
+ expect_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure creating arena");
+ }
+
+ return ind;
+}
+
+TEST_BEGIN(test_same_size) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, 0, 0);
+ expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_no_move) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, sz-42, 0);
+ expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_no_move_fail) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz + 5, 0, 0);
+ expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nsmall(void) {
+ return get_nsizes_impl("arenas.nbins");
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_small_size(size_t ind) {
+ return get_size_impl("arenas.bin.0.size", ind);
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+TEST_BEGIN(test_size) {
+ size_t small0, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test smallest supported size. */
+ expect_zu_eq(xallocx(p, 1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test largest supported size. */
+ expect_zu_le(xallocx(p, largemax, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test size overflow. */
+ expect_zu_le(xallocx(p, largemax+1, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_size_extra_overflow) {
+ size_t small0, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test overflows that can be resolved by clamping extra. */
+ expect_zu_le(xallocx(p, largemax-1, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, largemax, 1, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test overflow such that largemax-size underflows. */
+ expect_zu_le(xallocx(p, largemax+1, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, largemax+2, 3, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_small) {
+ size_t small0, small1, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ small1 = get_small_size(1);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ expect_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test size+extra overflow. */
+ expect_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0,
+ "Unexpected xallocx() behavior");
+ expect_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_large) {
+ int flags = MALLOCX_ARENA(arena_ind());
+ size_t smallmax, large1, large2, large3, largemax;
+ void *p;
+
+ /* Get size classes. */
+ smallmax = get_small_size(get_nsmall()-1);
+ large1 = get_large_size(1);
+ large2 = get_large_size(2);
+ large3 = get_large_size(3);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(large3, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ expect_zu_eq(xallocx(p, large3, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ expect_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ expect_zu_ge(xallocx(p, smallmax, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+
+ if (xallocx(p, large3, 0, flags) != large3) {
+ p = rallocx(p, large3, flags);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ /* Test size decrease with non-zero extra. */
+ expect_zu_eq(xallocx(p, large1, large3 - large1, flags), large3,
+ "Unexpected xallocx() behavior");
+ expect_zu_eq(xallocx(p, large2, large3 - large2, flags), large3,
+ "Unexpected xallocx() behavior");
+ expect_zu_ge(xallocx(p, large1, large2 - large1, flags), large2,
+ "Unexpected xallocx() behavior");
+ expect_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ expect_zu_le(xallocx(p, large3, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, largemax+1, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ expect_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ expect_zu_le(xallocx(p, large1, large3 - large1, flags), large3,
+ "Unexpected xallocx() behavior");
+
+ if (xallocx(p, large3, 0, flags) != large3) {
+ p = rallocx(p, large3, flags);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ /* Test size+extra overflow. */
+ expect_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, flags);
+}
+TEST_END
+
+static void
+print_filled_extents(const void *p, uint8_t c, size_t len) {
+ const uint8_t *pc = (const uint8_t *)p;
+ size_t i, range0;
+ uint8_t c0;
+
+ malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len);
+ range0 = 0;
+ c0 = pc[0];
+ for (i = 0; i < len; i++) {
+ if (pc[i] != c0) {
+ malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
+ range0 = i;
+ c0 = pc[i];
+ }
+ }
+ malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
+}
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
+ const uint8_t *pc = (const uint8_t *)p;
+ bool err;
+ size_t i;
+
+ for (i = offset, err = false; i < offset+len; i++) {
+ if (pc[i] != c) {
+ err = true;
+ }
+ }
+
+ if (err) {
+ print_filled_extents(p, c, offset + len);
+ }
+
+ return err;
+}
+
+static void
+test_zero(size_t szmin, size_t szmax) {
+ int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
+ size_t sz, nsz;
+ void *p;
+#define FILL_BYTE 0x7aU
+
+ sz = szmax;
+ p = mallocx(sz, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ expect_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
+ sz);
+
+ /*
+ * Fill with non-zero so that non-debug builds are more likely to detect
+ * errors.
+ */
+ memset(p, FILL_BYTE, sz);
+ expect_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ /* Shrink in place so that we can expect growing in place to succeed. */
+ sz = szmin;
+ if (xallocx(p, sz, 0, flags) != sz) {
+ p = rallocx(p, sz, flags);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ expect_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ for (sz = szmin; sz < szmax; sz = nsz) {
+ nsz = nallocx(sz+1, flags);
+ if (xallocx(p, sz+1, 0, flags) != nsz) {
+ p = rallocx(p, sz+1, flags);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ expect_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+ expect_false(validate_fill(p, 0x00, sz, nsz-sz),
+ "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
+ memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
+ expect_false(validate_fill(p, FILL_BYTE, 0, nsz),
+ "Memory not filled: nsz=%zu", nsz);
+ }
+
+ dallocx(p, flags);
+}
+
+TEST_BEGIN(test_zero_large) {
+ size_t large0, large1;
+
+ /* Get size classes. */
+ large0 = get_large_size(0);
+ large1 = get_large_size(1);
+
+ test_zero(large1, large0 * 2);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_same_size,
+ test_extra_no_move,
+ test_no_move_fail,
+ test_size,
+ test_size_extra_overflow,
+ test_extra_small,
+ test_extra_large,
+ test_zero_large);
+}
diff --git a/deps/jemalloc/test/integration/xallocx.sh b/deps/jemalloc/test/integration/xallocx.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/xallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/src/SFMT.c b/deps/jemalloc/test/src/SFMT.c
new file mode 100644
index 0000000..c05e218
--- /dev/null
+++ b/deps/jemalloc/test/src/SFMT.c
@@ -0,0 +1,719 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT.c
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software, see LICENSE.txt
+ */
+#define SFMT_C_
+#include "test/jemalloc_test.h"
+#include "test/SFMT-params.h"
+
+#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(ONLY64) && !defined(BIG_ENDIAN64)
+ #if defined(__GNUC__)
+ #error "-DONLY64 must be specified with -DBIG_ENDIAN64"
+ #endif
+#undef ONLY64
+#endif
+/*------------------------------------------------------
+ 128-bit SIMD data type for Altivec, SSE2 or standard C
+ ------------------------------------------------------*/
+#if defined(HAVE_ALTIVEC)
+/** 128-bit data structure */
+union W128_T {
+ vector unsigned int s;
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef union W128_T w128_t;
+
+#elif defined(HAVE_SSE2)
+/** 128-bit data structure */
+union W128_T {
+ __m128i si;
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef union W128_T w128_t;
+
+#else
+
+/** 128-bit data structure */
+struct W128_T {
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef struct W128_T w128_t;
+
+#endif
+
+struct sfmt_s {
+ /** the 128-bit internal state array */
+ w128_t sfmt[N];
+ /** index counter to the 32-bit internal state array */
+ int idx;
+ /** a flag: it is 0 if and only if the internal state is not yet
+ * initialized. */
+ int initialized;
+};
+
+/*--------------------------------------
+ FILE GLOBAL VARIABLES
+ internal state, index counter and flag
+ --------------------------------------*/
+
+/** a parity check vector which certificate the period of 2^{MEXP} */
+static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
+
+/*----------------
+ STATIC FUNCTIONS
+ ----------------*/
+static inline int idxof(int i);
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+static inline void rshift128(w128_t *out, w128_t const *in, int shift);
+static inline void lshift128(w128_t *out, w128_t const *in, int shift);
+#endif
+static inline void gen_rand_all(sfmt_t *ctx);
+static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
+static inline uint32_t func1(uint32_t x);
+static inline uint32_t func2(uint32_t x);
+static void period_certification(sfmt_t *ctx);
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+static inline void swap(w128_t *array, int size);
+#endif
+
+#if defined(HAVE_ALTIVEC)
+ #include "test/SFMT-alti.h"
+#elif defined(HAVE_SSE2)
+ #include "test/SFMT-sse2.h"
+#endif
+
+/**
+ * This function simulate a 64-bit index of LITTLE ENDIAN
+ * in BIG ENDIAN machine.
+ */
+#ifdef ONLY64
+static inline int idxof(int i) {
+ return i ^ 1;
+}
+#else
+static inline int idxof(int i) {
+ return i;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit right shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+#ifdef ONLY64
+static inline void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+static inline void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit left shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#ifdef ONLY64
+static inline void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+static inline void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+#endif
+
+/**
+ * This function represents the recursion formula.
+ * @param r output
+ * @param a a 128-bit part of the internal state array
+ * @param b a 128-bit part of the internal state array
+ * @param c a 128-bit part of the internal state array
+ * @param d a 128-bit part of the internal state array
+ */
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+#ifdef ONLY64
+static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SL2);
+ rshift128(&y, c, SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
+ ^ (d->u[0] << SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
+ ^ (d->u[1] << SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
+ ^ (d->u[2] << SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
+ ^ (d->u[3] << SL1);
+}
+#else
+static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SL2);
+ rshift128(&y, c, SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
+ ^ (d->u[0] << SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
+ ^ (d->u[1] << SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
+ ^ (d->u[2] << SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
+ ^ (d->u[3] << SL1);
+}
+#endif
+#endif
+
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+static inline void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ w128_t *r1, *r2;
+
+ r1 = &ctx->sfmt[N - 2];
+ r2 = &ctx->sfmt[N - 1];
+ for (i = 0; i < N - POS1; i++) {
+ do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
+ r2);
+ r1 = r2;
+ r2 = &ctx->sfmt[i];
+ }
+ for (; i < N; i++) {
+ do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
+ r2);
+ r1 = r2;
+ r2 = &ctx->sfmt[i];
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pseudorandom numbers to be generated.
+ */
+static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ w128_t *r1, *r2;
+
+ r1 = &ctx->sfmt[N - 2];
+ r2 = &ctx->sfmt[N - 1];
+ for (i = 0; i < N - POS1; i++) {
+ do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (; i < N; i++) {
+ do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (; i < size - N; i++) {
+ do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ ctx->sfmt[j] = array[j + size - N];
+ }
+ for (; i < size; i++, j++) {
+ do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ ctx->sfmt[j] = array[i];
+ }
+}
+#endif
+
+#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
+static inline void swap(w128_t *array, int size) {
+ int i;
+ uint32_t x, y;
+
+ for (i = 0; i < size; i++) {
+ x = array[i].u[0];
+ y = array[i].u[2];
+ array[i].u[0] = array[i].u[1];
+ array[i].u[2] = array[i].u[3];
+ array[i].u[1] = x;
+ array[i].u[3] = y;
+ }
+}
+#endif
+/**
+ * This function represents a function used in the initialization
+ * by init_by_array
+ * @param x 32-bit integer
+ * @return 32-bit integer
+ */
+static uint32_t func1(uint32_t x) {
+ return (x ^ (x >> 27)) * (uint32_t)1664525UL;
+}
+
+/**
+ * This function represents a function used in the initialization
+ * by init_by_array
+ * @param x 32-bit integer
+ * @return 32-bit integer
+ */
+static uint32_t func2(uint32_t x) {
+ return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
+}
+
+/**
+ * This function certificate the period of 2^{MEXP}
+ */
+static void period_certification(sfmt_t *ctx) {
+ int inner = 0;
+ int i, j;
+ uint32_t work;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+
+ for (i = 0; i < 4; i++)
+ inner ^= psfmt32[idxof(i)] & parity[i];
+ for (i = 16; i > 0; i >>= 1)
+ inner ^= inner >> i;
+ inner &= 1;
+ /* check OK */
+ if (inner == 1) {
+ return;
+ }
+ /* check NG, and modification */
+ for (i = 0; i < 4; i++) {
+ work = 1;
+ for (j = 0; j < 32; j++) {
+ if ((work & parity[i]) != 0) {
+ psfmt32[idxof(i)] ^= work;
+ return;
+ }
+ work = work << 1;
+ }
+ }
+}
+
+/*----------------
+ PUBLIC FUNCTIONS
+ ----------------*/
+/**
+ * This function returns the identification string.
+ * The string shows the word size, the Mersenne exponent,
+ * and all parameters of this generator.
+ */
+const char *get_idstring(void) {
+ return IDSTR;
+}
+
+/**
+ * This function returns the minimum size of array used for \b
+ * fill_array32() function.
+ * @return minimum size of array used for fill_array32() function.
+ */
+int get_min_array_size32(void) {
+ return N32;
+}
+
+/**
+ * This function returns the minimum size of array used for \b
+ * fill_array64() function.
+ * @return minimum size of array used for fill_array64() function.
+ */
+int get_min_array_size64(void) {
+ return N64;
+}
+
+#ifndef ONLY64
+/**
+ * This function generates and returns 32-bit pseudorandom number.
+ * init_gen_rand or init_by_array must be called before this function.
+ * @return 32-bit pseudorandom number
+ */
+uint32_t gen_rand32(sfmt_t *ctx) {
+ uint32_t r;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+
+ assert(ctx->initialized);
+ if (ctx->idx >= N32) {
+ gen_rand_all(ctx);
+ ctx->idx = 0;
+ }
+ r = psfmt32[ctx->idx++];
+ return r;
+}
+
+/* Generate a random integer in [0..limit). */
+uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
+ uint32_t ret, above;
+
+ above = 0xffffffffU - (0xffffffffU % limit);
+ while (1) {
+ ret = gen_rand32(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
+ }
+ return ret;
+}
+#endif
+/**
+ * This function generates and returns 64-bit pseudorandom number.
+ * init_gen_rand or init_by_array must be called before this function.
+ * The function gen_rand64 should not be called after gen_rand32,
+ * unless an initialization is again executed.
+ * @return 64-bit pseudorandom number
+ */
+uint64_t gen_rand64(sfmt_t *ctx) {
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ uint32_t r1, r2;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+#else
+ uint64_t r;
+ uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
+#endif
+
+ assert(ctx->initialized);
+ assert(ctx->idx % 2 == 0);
+
+ if (ctx->idx >= N32) {
+ gen_rand_all(ctx);
+ ctx->idx = 0;
+ }
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ r1 = psfmt32[ctx->idx];
+ r2 = psfmt32[ctx->idx + 1];
+ ctx->idx += 2;
+ return ((uint64_t)r2 << 32) | r1;
+#else
+ r = psfmt64[ctx->idx / 2];
+ ctx->idx += 2;
+ return r;
+#endif
+}
+
+/* Generate a random integer in [0..limit). */
+uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
+ uint64_t ret, above;
+
+ above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
+ while (1) {
+ ret = gen_rand64(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
+ }
+ return ret;
+}
+
+#ifndef ONLY64
+/**
+ * This function generates pseudorandom 32-bit integers in the
+ * specified array[] by one call. The number of pseudorandom integers
+ * is specified by the argument size, which must be at least 624 and a
+ * multiple of four. The generation by this function is much faster
+ * than the following gen_rand function.
+ *
+ * For initialization, init_gen_rand or init_by_array must be called
+ * before the first call of this function. This function can not be
+ * used after calling gen_rand function, without initialization.
+ *
+ * @param array an array where pseudorandom 32-bit integers are filled
+ * by this function. The pointer to the array must be \b "aligned"
+ * (namely, must be a multiple of 16) in the SIMD version, since it
+ * refers to the address of a 128-bit integer. In the standard C
+ * version, the pointer is arbitrary.
+ *
+ * @param size the number of 32-bit pseudorandom integers to be
+ * generated. size must be a multiple of 4, and greater than or equal
+ * to (MEXP / 128 + 1) * 4.
+ *
+ * @note \b memalign or \b posix_memalign is available to get aligned
+ * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
+ * returns the pointer to the aligned memory block.
+ */
+void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
+ assert(ctx->initialized);
+ assert(ctx->idx == N32);
+ assert(size % 4 == 0);
+ assert(size >= N32);
+
+ gen_rand_array(ctx, (w128_t *)array, size / 4);
+ ctx->idx = N32;
+}
+#endif
+
+/**
+ * This function generates pseudorandom 64-bit integers in the
+ * specified array[] by one call. The number of pseudorandom integers
+ * is specified by the argument size, which must be at least 312 and a
+ * multiple of two. The generation by this function is much faster
+ * than the following gen_rand function.
+ *
+ * For initialization, init_gen_rand or init_by_array must be called
+ * before the first call of this function. This function can not be
+ * used after calling gen_rand function, without initialization.
+ *
+ * @param array an array where pseudorandom 64-bit integers are filled
+ * by this function. The pointer to the array must be "aligned"
+ * (namely, must be a multiple of 16) in the SIMD version, since it
+ * refers to the address of a 128-bit integer. In the standard C
+ * version, the pointer is arbitrary.
+ *
+ * @param size the number of 64-bit pseudorandom integers to be
+ * generated. size must be a multiple of 2, and greater than or equal
+ * to (MEXP / 128 + 1) * 2
+ *
+ * @note \b memalign or \b posix_memalign is available to get aligned
+ * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
+ * returns the pointer to the aligned memory block.
+ */
+void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
+ assert(ctx->initialized);
+ assert(ctx->idx == N32);
+ assert(size % 2 == 0);
+ assert(size >= N64);
+
+ gen_rand_array(ctx, (w128_t *)array, size / 2);
+ ctx->idx = N32;
+
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ swap((w128_t *)array, size /2);
+#endif
+}
+
+/**
+ * This function initializes the internal state array with a 32-bit
+ * integer seed.
+ *
+ * @param seed a 32-bit integer used as the seed.
+ */
+sfmt_t *init_gen_rand(uint32_t seed) {
+ void *p;
+ sfmt_t *ctx;
+ int i;
+ uint32_t *psfmt32;
+
+ if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
+ return NULL;
+ }
+ ctx = (sfmt_t *)p;
+ psfmt32 = &ctx->sfmt[0].u[0];
+
+ psfmt32[idxof(0)] = seed;
+ for (i = 1; i < N32; i++) {
+ psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
+ ^ (psfmt32[idxof(i - 1)] >> 30))
+ + i;
+ }
+ ctx->idx = N32;
+ period_certification(ctx);
+ ctx->initialized = 1;
+
+ return ctx;
+}
+
+/**
+ * This function initializes the internal state array,
+ * with an array of 32-bit integers used as the seeds
+ * @param init_key the array of 32-bit integers, used as a seed.
+ * @param key_length the length of init_key.
+ */
+sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
+ void *p;
+ sfmt_t *ctx;
+ int i, j, count;
+ uint32_t r;
+ int lag;
+ int mid;
+ int size = N * 4;
+ uint32_t *psfmt32;
+
+ if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
+ return NULL;
+ }
+ ctx = (sfmt_t *)p;
+ psfmt32 = &ctx->sfmt[0].u[0];
+
+ if (size >= 623) {
+ lag = 11;
+ } else if (size >= 68) {
+ lag = 7;
+ } else if (size >= 39) {
+ lag = 5;
+ } else {
+ lag = 3;
+ }
+ mid = (size - lag) / 2;
+
+ memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
+ if (key_length + 1 > N32) {
+ count = key_length + 1;
+ } else {
+ count = N32;
+ }
+ r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
+ ^ psfmt32[idxof(N32 - 1)]);
+ psfmt32[idxof(mid)] += r;
+ r += key_length;
+ psfmt32[idxof(mid + lag)] += r;
+ psfmt32[idxof(0)] = r;
+
+ count--;
+ for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
+ r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
+ ^ psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] += r;
+ r += init_key[j] + i;
+ psfmt32[idxof((i + mid + lag) % N32)] += r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+ for (; j < count; j++) {
+ r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
+ ^ psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] += r;
+ r += i;
+ psfmt32[idxof((i + mid + lag) % N32)] += r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+ for (j = 0; j < N32; j++) {
+ r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
+ + psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] ^= r;
+ r -= i;
+ psfmt32[idxof((i + mid + lag) % N32)] ^= r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+
+ ctx->idx = N32;
+ period_certification(ctx);
+ ctx->initialized = 1;
+
+ return ctx;
+}
+
+void fini_gen_rand(sfmt_t *ctx) {
+ assert(ctx != NULL);
+
+ ctx->initialized = 0;
+ free(ctx);
+}
diff --git a/deps/jemalloc/test/src/btalloc.c b/deps/jemalloc/test/src/btalloc.c
new file mode 100644
index 0000000..d570952
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc.c
@@ -0,0 +1,6 @@
+#include "test/jemalloc_test.h"
+
+void *
+btalloc(size_t size, unsigned bits) {
+ return btalloc_0(size, bits);
+}
diff --git a/deps/jemalloc/test/src/btalloc_0.c b/deps/jemalloc/test/src/btalloc_0.c
new file mode 100644
index 0000000..77d8904
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc_0.c
@@ -0,0 +1,3 @@
+#include "test/jemalloc_test.h"
+
+btalloc_n_gen(0)
diff --git a/deps/jemalloc/test/src/btalloc_1.c b/deps/jemalloc/test/src/btalloc_1.c
new file mode 100644
index 0000000..4c126c3
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc_1.c
@@ -0,0 +1,3 @@
+#include "test/jemalloc_test.h"
+
+btalloc_n_gen(1)
diff --git a/deps/jemalloc/test/src/math.c b/deps/jemalloc/test/src/math.c
new file mode 100644
index 0000000..1758c67
--- /dev/null
+++ b/deps/jemalloc/test/src/math.c
@@ -0,0 +1,2 @@
+#define MATH_C_
+#include "test/jemalloc_test.h"
diff --git a/deps/jemalloc/test/src/mtx.c b/deps/jemalloc/test/src/mtx.c
new file mode 100644
index 0000000..d9ce375
--- /dev/null
+++ b/deps/jemalloc/test/src/mtx.c
@@ -0,0 +1,61 @@
+#include "test/jemalloc_test.h"
+
+#ifndef _CRT_SPINCOUNT
+#define _CRT_SPINCOUNT 4000
+#endif
+
+bool
+mtx_init(mtx_t *mtx) {
+#ifdef _WIN32
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->lock,
+ _CRT_SPINCOUNT)) {
+ return true;
+ }
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ mtx->lock = OS_UNFAIR_LOCK_INIT;
+#else
+ pthread_mutexattr_t attr;
+
+ if (pthread_mutexattr_init(&attr) != 0) {
+ return true;
+ }
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
+ if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return true;
+ }
+ pthread_mutexattr_destroy(&attr);
+#endif
+ return false;
+}
+
+void
+mtx_fini(mtx_t *mtx) {
+#ifdef _WIN32
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+#else
+ pthread_mutex_destroy(&mtx->lock);
+#endif
+}
+
+void
+mtx_lock(mtx_t *mtx) {
+#ifdef _WIN32
+ EnterCriticalSection(&mtx->lock);
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock_lock(&mtx->lock);
+#else
+ pthread_mutex_lock(&mtx->lock);
+#endif
+}
+
+void
+mtx_unlock(mtx_t *mtx) {
+#ifdef _WIN32
+ LeaveCriticalSection(&mtx->lock);
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock_unlock(&mtx->lock);
+#else
+ pthread_mutex_unlock(&mtx->lock);
+#endif
+}
diff --git a/deps/jemalloc/test/src/sleep.c b/deps/jemalloc/test/src/sleep.c
new file mode 100644
index 0000000..2234b4b
--- /dev/null
+++ b/deps/jemalloc/test/src/sleep.c
@@ -0,0 +1,27 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep
+ * time is guaranteed.
+ */
+void
+sleep_ns(unsigned ns) {
+ assert(ns <= 1000*1000*1000);
+
+#ifdef _WIN32
+ Sleep(ns / 1000 / 1000);
+#else
+ {
+ struct timespec timeout;
+
+ if (ns < 1000*1000*1000) {
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = ns;
+ } else {
+ timeout.tv_sec = 1;
+ timeout.tv_nsec = 0;
+ }
+ nanosleep(&timeout, NULL);
+ }
+#endif
+}
diff --git a/deps/jemalloc/test/src/test.c b/deps/jemalloc/test/src/test.c
new file mode 100644
index 0000000..4cd803e
--- /dev/null
+++ b/deps/jemalloc/test/src/test.c
@@ -0,0 +1,234 @@
+#include "test/jemalloc_test.h"
+
+/* Test status state. */
+
+static unsigned test_count = 0;
+static test_status_t test_counts[test_status_count] = {0, 0, 0};
+static test_status_t test_status = test_status_pass;
+static const char * test_name = "";
+
+/* Reentrancy testing helpers. */
+
+#define NUM_REENTRANT_ALLOCS 20
+typedef enum {
+ non_reentrant = 0,
+ libc_reentrant = 1,
+ arena_new_reentrant = 2
+} reentrancy_t;
+static reentrancy_t reentrancy;
+
+static bool libc_hook_ran = false;
+static bool arena_new_hook_ran = false;
+
+static const char *
+reentrancy_t_str(reentrancy_t r) {
+ switch (r) {
+ case non_reentrant:
+ return "non-reentrant";
+ case libc_reentrant:
+ return "libc-reentrant";
+ case arena_new_reentrant:
+ return "arena_new-reentrant";
+ default:
+ unreachable();
+ }
+}
+
+static void
+do_hook(bool *hook_ran, void (**hook)()) {
+ *hook_ran = true;
+ *hook = NULL;
+
+ size_t alloc_size = 1;
+ for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
+ free(malloc(alloc_size));
+ alloc_size *= 2;
+ }
+}
+
+static void
+libc_reentrancy_hook() {
+ do_hook(&libc_hook_ran, &test_hooks_libc_hook);
+}
+
+static void
+arena_new_reentrancy_hook() {
+ do_hook(&arena_new_hook_ran, &test_hooks_arena_new_hook);
+}
+
+/* Actual test infrastructure. */
+bool
+test_is_reentrant() {
+ return reentrancy != non_reentrant;
+}
+
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+test_skip(const char *format, ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+ malloc_printf("\n");
+ test_status = test_status_skip;
+}
+
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+test_fail(const char *format, ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+ malloc_printf("\n");
+ test_status = test_status_fail;
+}
+
+static const char *
+test_status_string(test_status_t current_status) {
+ switch (current_status) {
+ case test_status_pass: return "pass";
+ case test_status_skip: return "skip";
+ case test_status_fail: return "fail";
+ default: not_reached();
+ }
+}
+
+void
+p_test_init(const char *name) {
+ test_count++;
+ test_status = test_status_pass;
+ test_name = name;
+}
+
+void
+p_test_fini(void) {
+ test_counts[test_status]++;
+ malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy),
+ test_status_string(test_status));
+}
+
+static void
+check_global_slow(test_status_t *status) {
+#ifdef JEMALLOC_UNIT_TEST
+ /*
+ * This check needs to peek into tsd internals, which is why it's only
+ * exposed in unit tests.
+ */
+ if (tsd_global_slow()) {
+ malloc_printf("Testing increased global slow count\n");
+ *status = test_status_fail;
+ }
+#endif
+}
+
+static test_status_t
+p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
+ test_status_t ret;
+
+ if (do_malloc_init) {
+ /*
+ * Make sure initialization occurs prior to running tests.
+ * Tests are special because they may use internal facilities
+ * prior to triggering initialization as a side effect of
+ * calling into the public API.
+ */
+ if (nallocx(1, 0) == 0) {
+ malloc_printf("Initialization error");
+ return test_status_fail;
+ }
+ }
+
+ ret = test_status_pass;
+ for (; t != NULL; t = va_arg(ap, test_t *)) {
+ /* Non-reentrant run. */
+ reentrancy = non_reentrant;
+ test_hooks_arena_new_hook = test_hooks_libc_hook = NULL;
+ t();
+ if (test_status > ret) {
+ ret = test_status;
+ }
+ check_global_slow(&ret);
+ /* Reentrant run. */
+ if (do_reentrant) {
+ reentrancy = libc_reentrant;
+ test_hooks_arena_new_hook = NULL;
+ test_hooks_libc_hook = &libc_reentrancy_hook;
+ t();
+ if (test_status > ret) {
+ ret = test_status;
+ }
+ check_global_slow(&ret);
+
+ reentrancy = arena_new_reentrant;
+ test_hooks_libc_hook = NULL;
+ test_hooks_arena_new_hook = &arena_new_reentrancy_hook;
+ t();
+ if (test_status > ret) {
+ ret = test_status;
+ }
+ check_global_slow(&ret);
+ }
+ }
+
+ malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
+ test_status_string(test_status_pass),
+ test_counts[test_status_pass], test_count,
+ test_status_string(test_status_skip),
+ test_counts[test_status_skip], test_count,
+ test_status_string(test_status_fail),
+ test_counts[test_status_fail], test_count);
+
+ return ret;
+}
+
+test_status_t
+p_test(test_t *t, ...) {
+ test_status_t ret;
+ va_list ap;
+
+ ret = test_status_pass;
+ va_start(ap, t);
+ ret = p_test_impl(true, true, t, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+test_status_t
+p_test_no_reentrancy(test_t *t, ...) {
+ test_status_t ret;
+ va_list ap;
+
+ ret = test_status_pass;
+ va_start(ap, t);
+ ret = p_test_impl(true, false, t, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+test_status_t
+p_test_no_malloc_init(test_t *t, ...) {
+ test_status_t ret;
+ va_list ap;
+
+ ret = test_status_pass;
+ va_start(ap, t);
+ /*
+ * We also omit reentrancy from bootstrapping tests, since we don't
+ * (yet) care about general reentrancy during bootstrapping.
+ */
+ ret = p_test_impl(false, false, t, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+void
+p_test_fail(const char *prefix, const char *message) {
+ malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
+ test_status = test_status_fail;
+}
diff --git a/deps/jemalloc/test/src/thd.c b/deps/jemalloc/test/src/thd.c
new file mode 100644
index 0000000..9a15eab
--- /dev/null
+++ b/deps/jemalloc/test/src/thd.c
@@ -0,0 +1,34 @@
+#include "test/jemalloc_test.h"
+
+#ifdef _WIN32
+void
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg) {
+ LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
+ *thd = CreateThread(NULL, 0, routine, arg, 0, NULL);
+ if (*thd == NULL) {
+ test_fail("Error in CreateThread()\n");
+ }
+}
+
+void
+thd_join(thd_t thd, void **ret) {
+ if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) {
+ DWORD exit_code;
+ GetExitCodeThread(thd, (LPDWORD) &exit_code);
+ *ret = (void *)(uintptr_t)exit_code;
+ }
+}
+
+#else
+void
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg) {
+ if (pthread_create(thd, NULL, proc, arg) != 0) {
+ test_fail("Error in pthread_create()\n");
+ }
+}
+
+void
+thd_join(thd_t thd, void **ret) {
+ pthread_join(thd, ret);
+}
+#endif
diff --git a/deps/jemalloc/test/src/timer.c b/deps/jemalloc/test/src/timer.c
new file mode 100644
index 0000000..6e8b8ed
--- /dev/null
+++ b/deps/jemalloc/test/src/timer.c
@@ -0,0 +1,55 @@
+#include "test/jemalloc_test.h"
+
+void
+timer_start(timedelta_t *timer) {
+ nstime_init_update(&timer->t0);
+}
+
+void
+timer_stop(timedelta_t *timer) {
+ nstime_copy(&timer->t1, &timer->t0);
+ nstime_update(&timer->t1);
+}
+
+uint64_t
+timer_usec(const timedelta_t *timer) {
+ nstime_t delta;
+
+ nstime_copy(&delta, &timer->t1);
+ nstime_subtract(&delta, &timer->t0);
+ return nstime_ns(&delta) / 1000;
+}
+
+void
+timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) {
+ uint64_t t0 = timer_usec(a);
+ uint64_t t1 = timer_usec(b);
+ uint64_t mult;
+ size_t i = 0;
+ size_t j, n;
+
+ /* Whole. */
+ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
+ i += n;
+ if (i >= buflen) {
+ return;
+ }
+ mult = 1;
+ for (j = 0; j < n; j++) {
+ mult *= 10;
+ }
+
+ /* Decimal. */
+ n = malloc_snprintf(&buf[i], buflen-i, ".");
+ i += n;
+
+ /* Fraction. */
+ while (i < buflen-1) {
+ uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10
+ >= 5)) ? 1 : 0;
+ n = malloc_snprintf(&buf[i], buflen-i,
+ "%"FMTu64, (t0 * mult / t1) % 10 + round);
+ i += n;
+ mult *= 10;
+ }
+}
diff --git a/deps/jemalloc/test/stress/batch_alloc.c b/deps/jemalloc/test/stress/batch_alloc.c
new file mode 100644
index 0000000..427e1cb
--- /dev/null
+++ b/deps/jemalloc/test/stress/batch_alloc.c
@@ -0,0 +1,198 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+#define MIBLEN 8
+static size_t mib[MIBLEN];
+static size_t miblen = MIBLEN;
+
+#define TINY_BATCH 10
+#define TINY_BATCH_ITER (10 * 1000 * 1000)
+#define HUGE_BATCH (1000 * 1000)
+#define HUGE_BATCH_ITER 100
+#define LEN (100 * 1000 * 1000)
+static void *batch_ptrs[LEN];
+static size_t batch_ptrs_next = 0;
+static void *item_ptrs[LEN];
+static size_t item_ptrs_next = 0;
+
+#define SIZE 7
+
+typedef struct batch_alloc_packet_s batch_alloc_packet_t;
+struct batch_alloc_packet_s {
+ void **ptrs;
+ size_t num;
+ size_t size;
+ int flags;
+};
+
+static void
+batch_alloc_wrapper(size_t batch) {
+ batch_alloc_packet_t batch_alloc_packet =
+ {batch_ptrs + batch_ptrs_next, batch, SIZE, 0};
+ size_t filled;
+ size_t len = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &filled, &len,
+ &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, "");
+ assert_zu_eq(filled, batch, "");
+}
+
+static void
+item_alloc_wrapper(size_t batch) {
+ for (size_t i = item_ptrs_next, end = i + batch; i < end; ++i) {
+ item_ptrs[i] = malloc(SIZE);
+ }
+}
+
+static void
+release_and_clear(void **ptrs, size_t len) {
+ for (size_t i = 0; i < len; ++i) {
+ void *p = ptrs[i];
+ assert_ptr_not_null(p, "allocation failed");
+ sdallocx(p, SIZE, 0);
+ ptrs[i] = NULL;
+ }
+}
+
+static void
+batch_alloc_without_free(size_t batch) {
+ batch_alloc_wrapper(batch);
+ batch_ptrs_next += batch;
+}
+
+static void
+item_alloc_without_free(size_t batch) {
+ item_alloc_wrapper(batch);
+ item_ptrs_next += batch;
+}
+
+static void
+batch_alloc_with_free(size_t batch) {
+ batch_alloc_wrapper(batch);
+ release_and_clear(batch_ptrs + batch_ptrs_next, batch);
+ batch_ptrs_next += batch;
+}
+
+static void
+item_alloc_with_free(size_t batch) {
+ item_alloc_wrapper(batch);
+ release_and_clear(item_ptrs + item_ptrs_next, batch);
+ item_ptrs_next += batch;
+}
+
+static void
+compare_without_free(size_t batch, size_t iter,
+ void (*batch_alloc_without_free_func)(void),
+ void (*item_alloc_without_free_func)(void)) {
+ assert(batch_ptrs_next == 0);
+ assert(item_ptrs_next == 0);
+ assert(batch * iter <= LEN);
+ for (size_t i = 0; i < iter; ++i) {
+ batch_alloc_without_free_func();
+ item_alloc_without_free_func();
+ }
+ release_and_clear(batch_ptrs, batch_ptrs_next);
+ batch_ptrs_next = 0;
+ release_and_clear(item_ptrs, item_ptrs_next);
+ item_ptrs_next = 0;
+ compare_funcs(0, iter,
+ "batch allocation", batch_alloc_without_free_func,
+ "item allocation", item_alloc_without_free_func);
+ release_and_clear(batch_ptrs, batch_ptrs_next);
+ batch_ptrs_next = 0;
+ release_and_clear(item_ptrs, item_ptrs_next);
+ item_ptrs_next = 0;
+}
+
+static void
+compare_with_free(size_t batch, size_t iter,
+ void (*batch_alloc_with_free_func)(void),
+ void (*item_alloc_with_free_func)(void)) {
+ assert(batch_ptrs_next == 0);
+ assert(item_ptrs_next == 0);
+ assert(batch * iter <= LEN);
+ for (size_t i = 0; i < iter; ++i) {
+ batch_alloc_with_free_func();
+ item_alloc_with_free_func();
+ }
+ batch_ptrs_next = 0;
+ item_ptrs_next = 0;
+ compare_funcs(0, iter,
+ "batch allocation", batch_alloc_with_free_func,
+ "item allocation", item_alloc_with_free_func);
+ batch_ptrs_next = 0;
+ item_ptrs_next = 0;
+}
+
+static void
+batch_alloc_without_free_tiny() {
+ batch_alloc_without_free(TINY_BATCH);
+}
+
+static void
+item_alloc_without_free_tiny() {
+ item_alloc_without_free(TINY_BATCH);
+}
+
+TEST_BEGIN(test_tiny_batch_without_free) {
+ compare_without_free(TINY_BATCH, TINY_BATCH_ITER,
+ batch_alloc_without_free_tiny, item_alloc_without_free_tiny);
+}
+TEST_END
+
+static void
+batch_alloc_with_free_tiny() {
+ batch_alloc_with_free(TINY_BATCH);
+}
+
+static void
+item_alloc_with_free_tiny() {
+ item_alloc_with_free(TINY_BATCH);
+}
+
+TEST_BEGIN(test_tiny_batch_with_free) {
+ compare_with_free(TINY_BATCH, TINY_BATCH_ITER,
+ batch_alloc_with_free_tiny, item_alloc_with_free_tiny);
+}
+TEST_END
+
+static void
+batch_alloc_without_free_huge() {
+ batch_alloc_without_free(HUGE_BATCH);
+}
+
+static void
+item_alloc_without_free_huge() {
+ item_alloc_without_free(HUGE_BATCH);
+}
+
+TEST_BEGIN(test_huge_batch_without_free) {
+ compare_without_free(HUGE_BATCH, HUGE_BATCH_ITER,
+ batch_alloc_without_free_huge, item_alloc_without_free_huge);
+}
+TEST_END
+
+static void
+batch_alloc_with_free_huge() {
+ batch_alloc_with_free(HUGE_BATCH);
+}
+
+static void
+item_alloc_with_free_huge() {
+ item_alloc_with_free(HUGE_BATCH);
+}
+
+TEST_BEGIN(test_huge_batch_with_free) {
+ compare_with_free(HUGE_BATCH, HUGE_BATCH_ITER,
+ batch_alloc_with_free_huge, item_alloc_with_free_huge);
+}
+TEST_END
+
+int main(void) {
+ assert_d_eq(mallctlnametomib("experimental.batch_alloc", mib, &miblen),
+ 0, "");
+ return test_no_reentrancy(
+ test_tiny_batch_without_free,
+ test_tiny_batch_with_free,
+ test_huge_batch_without_free,
+ test_huge_batch_with_free);
+}
diff --git a/deps/jemalloc/test/stress/fill_flush.c b/deps/jemalloc/test/stress/fill_flush.c
new file mode 100644
index 0000000..a2db044
--- /dev/null
+++ b/deps/jemalloc/test/stress/fill_flush.c
@@ -0,0 +1,76 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+#define SMALL_ALLOC_SIZE 128
+#define LARGE_ALLOC_SIZE SC_LARGE_MINCLASS
+#define NALLOCS 1000
+
+/*
+ * We make this volatile so the 1-at-a-time variants can't leave the allocation
+ * in a register, just to try to get the cache behavior closer.
+ */
+void *volatile allocs[NALLOCS];
+
+static void
+array_alloc_dalloc_small(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(SMALL_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ }
+ for (int i = 0; i < NALLOCS; i++) {
+ sdallocx(allocs[i], SMALL_ALLOC_SIZE, 0);
+ }
+}
+
+static void
+item_alloc_dalloc_small(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(SMALL_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ sdallocx(allocs[i], SMALL_ALLOC_SIZE, 0);
+ }
+}
+
+TEST_BEGIN(test_array_vs_item_small) {
+ compare_funcs(1 * 1000, 10 * 1000,
+ "array of small allocations", array_alloc_dalloc_small,
+ "small item allocation", item_alloc_dalloc_small);
+}
+TEST_END
+
+static void
+array_alloc_dalloc_large(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(LARGE_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ }
+ for (int i = 0; i < NALLOCS; i++) {
+ sdallocx(allocs[i], LARGE_ALLOC_SIZE, 0);
+ }
+}
+
+static void
+item_alloc_dalloc_large(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(LARGE_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ sdallocx(allocs[i], LARGE_ALLOC_SIZE, 0);
+ }
+}
+
+TEST_BEGIN(test_array_vs_item_large) {
+ compare_funcs(100, 1000,
+ "array of large allocations", array_alloc_dalloc_large,
+ "large item allocation", item_alloc_dalloc_large);
+}
+TEST_END
+
+int main(void) {
+ return test_no_reentrancy(
+ test_array_vs_item_small,
+ test_array_vs_item_large);
+}
diff --git a/deps/jemalloc/test/stress/hookbench.c b/deps/jemalloc/test/stress/hookbench.c
new file mode 100644
index 0000000..97e90b0
--- /dev/null
+++ b/deps/jemalloc/test/stress/hookbench.c
@@ -0,0 +1,73 @@
+#include "test/jemalloc_test.h"
+
+static void
+noop_alloc_hook(void *extra, hook_alloc_t type, void *result,
+ uintptr_t result_raw, uintptr_t args_raw[3]) {
+}
+
+static void
+noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
+ uintptr_t args_raw[3]) {
+}
+
+static void
+noop_expand_hook(void *extra, hook_expand_t type, void *address,
+ size_t old_usize, size_t new_usize, uintptr_t result_raw,
+ uintptr_t args_raw[4]) {
+}
+
+static void
+malloc_free_loop(int iters) {
+ for (int i = 0; i < iters; i++) {
+ void *p = mallocx(1, 0);
+ free(p);
+ }
+}
+
+static void
+test_hooked(int iters) {
+ hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook,
+ NULL};
+
+ int err;
+ void *handles[HOOK_MAX];
+ size_t sz = sizeof(handles[0]);
+
+ for (int i = 0; i < HOOK_MAX; i++) {
+ err = mallctl("experimental.hooks.install", &handles[i],
+ &sz, &hooks, sizeof(hooks));
+ assert(err == 0);
+
+ timedelta_t timer;
+ timer_start(&timer);
+ malloc_free_loop(iters);
+ timer_stop(&timer);
+ malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1,
+ i + 1 == 1 ? "" : "s", timer_usec(&timer));
+ }
+ for (int i = 0; i < HOOK_MAX; i++) {
+ err = mallctl("experimental.hooks.remove", NULL, NULL,
+ &handles[i], sizeof(handles[i]));
+ assert(err == 0);
+ }
+}
+
+static void
+test_unhooked(int iters) {
+ timedelta_t timer;
+ timer_start(&timer);
+ malloc_free_loop(iters);
+ timer_stop(&timer);
+
+ malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer));
+}
+
+int
+main(void) {
+ /* Initialize */
+ free(mallocx(1, 0));
+ int iters = 10 * 1000 * 1000;
+ malloc_printf("Benchmarking hooks with %d iterations:\n", iters);
+ test_hooked(iters);
+ test_unhooked(iters);
+}
diff --git a/deps/jemalloc/test/stress/large_microbench.c b/deps/jemalloc/test/stress/large_microbench.c
new file mode 100644
index 0000000..c66b33a
--- /dev/null
+++ b/deps/jemalloc/test/stress/large_microbench.c
@@ -0,0 +1,33 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+static void
+large_mallocx_free(void) {
+ /*
+ * We go a bit larger than the large minclass on its own to better
+ * expose costs from things like zeroing.
+ */
+ void *p = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ free(p);
+}
+
+static void
+small_mallocx_free(void) {
+ void *p = mallocx(16, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ free(p);
+}
+
+TEST_BEGIN(test_large_vs_small) {
+ compare_funcs(100*1000, 1*1000*1000, "large mallocx",
+ large_mallocx_free, "small mallocx", small_mallocx_free);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_large_vs_small);
+}
+
diff --git a/deps/jemalloc/test/stress/mallctl.c b/deps/jemalloc/test/stress/mallctl.c
new file mode 100644
index 0000000..d29b311
--- /dev/null
+++ b/deps/jemalloc/test/stress/mallctl.c
@@ -0,0 +1,74 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+static void
+mallctl_short(void) {
+ const char *version;
+ size_t sz = sizeof(version);
+ int err = mallctl("version", &version, &sz, NULL, 0);
+ assert_d_eq(err, 0, "mallctl failure");
+}
+
+size_t mib_short[1];
+
+static void
+mallctlbymib_short(void) {
+ size_t miblen = sizeof(mib_short)/sizeof(mib_short[0]);
+ const char *version;
+ size_t sz = sizeof(version);
+ int err = mallctlbymib(mib_short, miblen, &version, &sz, NULL, 0);
+ assert_d_eq(err, 0, "mallctlbymib failure");
+}
+
+TEST_BEGIN(test_mallctl_vs_mallctlbymib_short) {
+ size_t miblen = sizeof(mib_short)/sizeof(mib_short[0]);
+
+ int err = mallctlnametomib("version", mib_short, &miblen);
+ assert_d_eq(err, 0, "mallctlnametomib failure");
+ compare_funcs(10*1000*1000, 10*1000*1000, "mallctl_short",
+ mallctl_short, "mallctlbymib_short", mallctlbymib_short);
+}
+TEST_END
+
+static void
+mallctl_long(void) {
+ uint64_t nmalloc;
+ size_t sz = sizeof(nmalloc);
+ int err = mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, NULL,
+ 0);
+ assert_d_eq(err, 0, "mallctl failure");
+}
+
+size_t mib_long[6];
+
+static void
+mallctlbymib_long(void) {
+ size_t miblen = sizeof(mib_long)/sizeof(mib_long[0]);
+ uint64_t nmalloc;
+ size_t sz = sizeof(nmalloc);
+ int err = mallctlbymib(mib_long, miblen, &nmalloc, &sz, NULL, 0);
+ assert_d_eq(err, 0, "mallctlbymib failure");
+}
+
+TEST_BEGIN(test_mallctl_vs_mallctlbymib_long) {
+ /*
+ * We want to use the longest mallctl we have; that needs stats support
+ * to be allowed.
+ */
+ test_skip_if(!config_stats);
+
+ size_t miblen = sizeof(mib_long)/sizeof(mib_long[0]);
+ int err = mallctlnametomib("stats.arenas.0.bins.0.nmalloc", mib_long,
+ &miblen);
+ assert_d_eq(err, 0, "mallctlnametomib failure");
+ compare_funcs(10*1000*1000, 10*1000*1000, "mallctl_long",
+ mallctl_long, "mallctlbymib_long", mallctlbymib_long);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_mallctl_vs_mallctlbymib_short,
+ test_mallctl_vs_mallctlbymib_long);
+}
diff --git a/deps/jemalloc/test/stress/microbench.c b/deps/jemalloc/test/stress/microbench.c
new file mode 100644
index 0000000..062e32f
--- /dev/null
+++ b/deps/jemalloc/test/stress/microbench.c
@@ -0,0 +1,126 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+static void
+malloc_free(void) {
+ /* The compiler can optimize away free(malloc(1))! */
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ free(p);
+}
+
+static void
+mallocx_free(void) {
+ void *p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_malloc_vs_mallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc",
+ malloc_free, "mallocx", mallocx_free);
+}
+TEST_END
+
+static void
+malloc_dallocx(void) {
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ dallocx(p, 0);
+}
+
+static void
+malloc_sdallocx(void) {
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ sdallocx(p, 1, 0);
+}
+
+TEST_BEGIN(test_free_vs_dallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free,
+ "dallocx", malloc_dallocx);
+}
+TEST_END
+
+TEST_BEGIN(test_dallocx_vs_sdallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx,
+ "sdallocx", malloc_sdallocx);
+}
+TEST_END
+
+static void
+malloc_mus_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ TEST_MALLOC_SIZE(p);
+ free(p);
+}
+
+static void
+malloc_sallocx_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (sallocx(p, 0) < 1) {
+ test_fail("Unexpected sallocx() failure");
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_mus_vs_sallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size",
+ malloc_mus_free, "sallocx", malloc_sallocx_free);
+}
+TEST_END
+
+static void
+malloc_nallocx_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (nallocx(1, 0) < 1) {
+ test_fail("Unexpected nallocx() failure");
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_sallocx_vs_nallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "sallocx",
+ malloc_sallocx_free, "nallocx", malloc_nallocx_free);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_malloc_vs_mallocx,
+ test_free_vs_dallocx,
+ test_dallocx_vs_sdallocx,
+ test_mus_vs_sallocx,
+ test_sallocx_vs_nallocx);
+}
diff --git a/deps/jemalloc/test/test.sh.in b/deps/jemalloc/test/test.sh.in
new file mode 100644
index 0000000..39302ff
--- /dev/null
+++ b/deps/jemalloc/test/test.sh.in
@@ -0,0 +1,80 @@
+#!/bin/sh
+
+case @abi@ in
+ macho)
+ export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib"
+ ;;
+ pecoff)
+ export PATH="${PATH}:@objroot@lib"
+ ;;
+ *)
+ ;;
+esac
+
+# Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so
+# it can be repeatedly concatenated with per test settings.
+export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF}
+# Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL.
+export_malloc_conf() {
+ if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then
+ export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}"
+ else
+ export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}"
+ fi
+}
+
+# Corresponds to test_status_t.
+pass_code=0
+skip_code=1
+fail_code=2
+
+pass_count=0
+skip_count=0
+fail_count=0
+for t in $@; do
+ if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then
+ echo
+ fi
+ echo "=== ${t} ==="
+ if [ -e "@srcroot@${t}.sh" ] ; then
+ # Source the shell script corresponding to the test in a subshell and
+ # execute the test. This allows the shell script to set MALLOC_CONF, which
+ # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the
+ # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail).
+ enable_fill=@enable_fill@ \
+ enable_prof=@enable_prof@ \
+ . @srcroot@${t}.sh && \
+ export_malloc_conf && \
+ $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@
+ else
+ export MALLOC_CONF= && \
+ export_malloc_conf && \
+ $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@
+ fi
+ result_code=$?
+ case ${result_code} in
+ ${pass_code})
+ pass_count=$((pass_count+1))
+ ;;
+ ${skip_code})
+ skip_count=$((skip_count+1))
+ ;;
+ ${fail_code})
+ fail_count=$((fail_count+1))
+ ;;
+ *)
+ echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2
+ echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2
+ exit 1
+ esac
+done
+
+total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}`
+echo
+echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}"
+
+if [ ${fail_count} -eq 0 ] ; then
+ exit 0
+else
+ exit 1
+fi
diff --git a/deps/jemalloc/test/unit/SFMT.c b/deps/jemalloc/test/unit/SFMT.c
new file mode 100644
index 0000000..b9f85dd
--- /dev/null
+++ b/deps/jemalloc/test/unit/SFMT.c
@@ -0,0 +1,1599 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "test/jemalloc_test.h"
+
+#define BLOCK_SIZE 10000
+#define BLOCK_SIZE64 (BLOCK_SIZE / 2)
+#define COUNT_1 1000
+#define COUNT_2 700
+
+static const uint32_t init_gen_rand_32_expected[] = {
+ 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U,
+ 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U,
+ 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U,
+ 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U,
+ 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U,
+ 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U,
+ 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U,
+ 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U,
+ 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U,
+ 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U,
+ 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U,
+ 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U,
+ 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U,
+ 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U,
+ 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U,
+ 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U,
+ 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U,
+ 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U,
+ 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U,
+ 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U,
+ 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U,
+ 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U,
+ 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U,
+ 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U,
+ 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U,
+ 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U,
+ 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U,
+ 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U,
+ 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U,
+ 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U,
+ 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U,
+ 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U,
+ 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U,
+ 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U,
+ 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U,
+ 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U,
+ 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U,
+ 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U,
+ 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U,
+ 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U,
+ 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U,
+ 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U,
+ 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U,
+ 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U,
+ 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U,
+ 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U,
+ 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U,
+ 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U,
+ 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U,
+ 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U,
+ 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U,
+ 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U,
+ 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U,
+ 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U,
+ 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U,
+ 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U,
+ 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U,
+ 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U,
+ 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U,
+ 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U,
+ 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U,
+ 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U,
+ 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U,
+ 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U,
+ 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U,
+ 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U,
+ 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U,
+ 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U,
+ 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U,
+ 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U,
+ 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U,
+ 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U,
+ 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U,
+ 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U,
+ 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U,
+ 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U,
+ 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U,
+ 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U,
+ 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U,
+ 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U,
+ 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U,
+ 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U,
+ 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U,
+ 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U,
+ 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U,
+ 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U,
+ 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U,
+ 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U,
+ 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U,
+ 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U,
+ 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U,
+ 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U,
+ 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U,
+ 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U,
+ 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U,
+ 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U,
+ 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U,
+ 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U,
+ 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U,
+ 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U,
+ 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U,
+ 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U,
+ 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U,
+ 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U,
+ 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U,
+ 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U,
+ 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U,
+ 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U,
+ 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U,
+ 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U,
+ 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U,
+ 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U,
+ 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U,
+ 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U,
+ 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U,
+ 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U,
+ 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U,
+ 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U,
+ 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U,
+ 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U,
+ 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U,
+ 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U,
+ 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U,
+ 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U,
+ 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U,
+ 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U,
+ 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U,
+ 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U,
+ 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U,
+ 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U,
+ 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U,
+ 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U,
+ 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U,
+ 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U,
+ 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U,
+ 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U,
+ 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U,
+ 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U,
+ 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U,
+ 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U,
+ 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U,
+ 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U,
+ 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U,
+ 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U,
+ 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U,
+ 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U,
+ 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U,
+ 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U,
+ 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U,
+ 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U,
+ 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U,
+ 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U,
+ 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U,
+ 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U,
+ 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U,
+ 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U,
+ 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U,
+ 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U,
+ 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U,
+ 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U,
+ 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U,
+ 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U,
+ 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U,
+ 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U,
+ 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U,
+ 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U,
+ 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U,
+ 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U,
+ 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U,
+ 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U,
+ 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U,
+ 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U,
+ 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U,
+ 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U,
+ 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U,
+ 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U,
+ 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U,
+ 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U,
+ 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U,
+ 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U,
+ 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U,
+ 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U,
+ 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U,
+ 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U,
+ 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U,
+ 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U,
+ 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U,
+ 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U,
+ 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U,
+ 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U,
+ 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U,
+ 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U,
+ 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U,
+ 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U,
+ 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U,
+ 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U,
+ 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U,
+ 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U,
+ 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U,
+ 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U
+};
+static const uint32_t init_by_array_32_expected[] = {
+ 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U,
+ 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U,
+ 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U,
+ 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U,
+ 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U,
+ 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U,
+ 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U,
+ 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U,
+ 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U,
+ 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U,
+ 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U,
+ 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U,
+ 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U,
+ 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U,
+ 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U,
+ 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U,
+ 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U,
+ 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U,
+ 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U,
+ 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U,
+ 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U,
+ 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U,
+ 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U,
+ 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U,
+ 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U,
+ 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U,
+ 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U,
+ 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U,
+ 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U,
+ 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U,
+ 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U,
+ 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U,
+ 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U,
+ 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U,
+ 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U,
+ 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U,
+ 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U,
+ 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U,
+ 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U,
+ 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U,
+ 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U,
+ 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U,
+ 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U,
+ 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U,
+ 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U,
+ 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U,
+ 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U,
+ 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U,
+ 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U,
+ 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U,
+ 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U,
+ 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U,
+ 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U,
+ 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U,
+ 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U,
+ 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U,
+ 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U,
+ 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U,
+ 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U,
+ 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U,
+ 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U,
+ 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U,
+ 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U,
+ 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U,
+ 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U,
+ 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U,
+ 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U,
+ 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U,
+ 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U,
+ 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U,
+ 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U,
+ 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U,
+ 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U,
+ 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U,
+ 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U,
+ 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U,
+ 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U,
+ 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U,
+ 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U,
+ 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U,
+ 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U,
+ 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U,
+ 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U,
+ 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U,
+ 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U,
+ 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U,
+ 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U,
+ 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U,
+ 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U,
+ 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U,
+ 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U,
+ 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U,
+ 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U,
+ 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U,
+ 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U,
+ 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U,
+ 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U,
+ 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U,
+ 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U,
+ 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U,
+ 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U,
+ 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U,
+ 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U,
+ 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U,
+ 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U,
+ 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U,
+ 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U,
+ 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U,
+ 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U,
+ 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U,
+ 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U,
+ 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U,
+ 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U,
+ 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U,
+ 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U,
+ 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U,
+ 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U,
+ 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U,
+ 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U,
+ 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U,
+ 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U,
+ 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U,
+ 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U,
+ 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U,
+ 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U,
+ 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U,
+ 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U,
+ 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U,
+ 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U,
+ 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U,
+ 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U,
+ 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U,
+ 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U,
+ 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U,
+ 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U,
+ 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U,
+ 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U,
+ 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U,
+ 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U,
+ 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U,
+ 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U,
+ 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U,
+ 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U,
+ 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U,
+ 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U,
+ 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U,
+ 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U,
+ 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U,
+ 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U,
+ 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U,
+ 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U,
+ 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U,
+ 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U,
+ 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U,
+ 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U,
+ 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U,
+ 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U,
+ 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U,
+ 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U,
+ 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U,
+ 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U,
+ 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U,
+ 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U,
+ 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U,
+ 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U,
+ 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U,
+ 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U,
+ 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U,
+ 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U,
+ 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U,
+ 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U,
+ 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U,
+ 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U,
+ 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U,
+ 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U,
+ 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U,
+ 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U,
+ 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U,
+ 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U,
+ 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U,
+ 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U,
+ 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U,
+ 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U,
+ 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U,
+ 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U,
+ 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U,
+ 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U,
+ 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U,
+ 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U,
+ 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U,
+ 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U,
+ 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U,
+ 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U,
+ 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U,
+ 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U,
+ 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U,
+ 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U,
+ 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U,
+ 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U,
+ 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U
+};
+static const uint64_t init_gen_rand_64_expected[] = {
+ KQU(16924766246869039260), KQU( 8201438687333352714),
+ KQU( 2265290287015001750), KQU(18397264611805473832),
+ KQU( 3375255223302384358), KQU( 6345559975416828796),
+ KQU(18229739242790328073), KQU( 7596792742098800905),
+ KQU( 255338647169685981), KQU( 2052747240048610300),
+ KQU(18328151576097299343), KQU(12472905421133796567),
+ KQU(11315245349717600863), KQU(16594110197775871209),
+ KQU(15708751964632456450), KQU(10452031272054632535),
+ KQU(11097646720811454386), KQU( 4556090668445745441),
+ KQU(17116187693090663106), KQU(14931526836144510645),
+ KQU( 9190752218020552591), KQU( 9625800285771901401),
+ KQU(13995141077659972832), KQU( 5194209094927829625),
+ KQU( 4156788379151063303), KQU( 8523452593770139494),
+ KQU(14082382103049296727), KQU( 2462601863986088483),
+ KQU( 3030583461592840678), KQU( 5221622077872827681),
+ KQU( 3084210671228981236), KQU(13956758381389953823),
+ KQU(13503889856213423831), KQU(15696904024189836170),
+ KQU( 4612584152877036206), KQU( 6231135538447867881),
+ KQU(10172457294158869468), KQU( 6452258628466708150),
+ KQU(14044432824917330221), KQU( 370168364480044279),
+ KQU(10102144686427193359), KQU( 667870489994776076),
+ KQU( 2732271956925885858), KQU(18027788905977284151),
+ KQU(15009842788582923859), KQU( 7136357960180199542),
+ KQU(15901736243475578127), KQU(16951293785352615701),
+ KQU(10551492125243691632), KQU(17668869969146434804),
+ KQU(13646002971174390445), KQU( 9804471050759613248),
+ KQU( 5511670439655935493), KQU(18103342091070400926),
+ KQU(17224512747665137533), KQU(15534627482992618168),
+ KQU( 1423813266186582647), KQU(15821176807932930024),
+ KQU( 30323369733607156), KQU(11599382494723479403),
+ KQU( 653856076586810062), KQU( 3176437395144899659),
+ KQU(14028076268147963917), KQU(16156398271809666195),
+ KQU( 3166955484848201676), KQU( 5746805620136919390),
+ KQU(17297845208891256593), KQU(11691653183226428483),
+ KQU(17900026146506981577), KQU(15387382115755971042),
+ KQU(16923567681040845943), KQU( 8039057517199388606),
+ KQU(11748409241468629263), KQU( 794358245539076095),
+ KQU(13438501964693401242), KQU(14036803236515618962),
+ KQU( 5252311215205424721), KQU(17806589612915509081),
+ KQU( 6802767092397596006), KQU(14212120431184557140),
+ KQU( 1072951366761385712), KQU(13098491780722836296),
+ KQU( 9466676828710797353), KQU(12673056849042830081),
+ KQU(12763726623645357580), KQU(16468961652999309493),
+ KQU(15305979875636438926), KQU(17444713151223449734),
+ KQU( 5692214267627883674), KQU(13049589139196151505),
+ KQU( 880115207831670745), KQU( 1776529075789695498),
+ KQU(16695225897801466485), KQU(10666901778795346845),
+ KQU( 6164389346722833869), KQU( 2863817793264300475),
+ KQU( 9464049921886304754), KQU( 3993566636740015468),
+ KQU( 9983749692528514136), KQU(16375286075057755211),
+ KQU(16042643417005440820), KQU(11445419662923489877),
+ KQU( 7999038846885158836), KQU( 6721913661721511535),
+ KQU( 5363052654139357320), KQU( 1817788761173584205),
+ KQU(13290974386445856444), KQU( 4650350818937984680),
+ KQU( 8219183528102484836), KQU( 1569862923500819899),
+ KQU( 4189359732136641860), KQU(14202822961683148583),
+ KQU( 4457498315309429058), KQU(13089067387019074834),
+ KQU(11075517153328927293), KQU(10277016248336668389),
+ KQU( 7070509725324401122), KQU(17808892017780289380),
+ KQU(13143367339909287349), KQU( 1377743745360085151),
+ KQU( 5749341807421286485), KQU(14832814616770931325),
+ KQU( 7688820635324359492), KQU(10960474011539770045),
+ KQU( 81970066653179790), KQU(12619476072607878022),
+ KQU( 4419566616271201744), KQU(15147917311750568503),
+ KQU( 5549739182852706345), KQU( 7308198397975204770),
+ KQU(13580425496671289278), KQU(17070764785210130301),
+ KQU( 8202832846285604405), KQU( 6873046287640887249),
+ KQU( 6927424434308206114), KQU( 6139014645937224874),
+ KQU(10290373645978487639), KQU(15904261291701523804),
+ KQU( 9628743442057826883), KQU(18383429096255546714),
+ KQU( 4977413265753686967), KQU( 7714317492425012869),
+ KQU( 9025232586309926193), KQU(14627338359776709107),
+ KQU(14759849896467790763), KQU(10931129435864423252),
+ KQU( 4588456988775014359), KQU(10699388531797056724),
+ KQU( 468652268869238792), KQU( 5755943035328078086),
+ KQU( 2102437379988580216), KQU( 9986312786506674028),
+ KQU( 2654207180040945604), KQU( 8726634790559960062),
+ KQU( 100497234871808137), KQU( 2800137176951425819),
+ KQU( 6076627612918553487), KQU( 5780186919186152796),
+ KQU( 8179183595769929098), KQU( 6009426283716221169),
+ KQU( 2796662551397449358), KQU( 1756961367041986764),
+ KQU( 6972897917355606205), KQU(14524774345368968243),
+ KQU( 2773529684745706940), KQU( 4853632376213075959),
+ KQU( 4198177923731358102), KQU( 8271224913084139776),
+ KQU( 2741753121611092226), KQU(16782366145996731181),
+ KQU(15426125238972640790), KQU(13595497100671260342),
+ KQU( 3173531022836259898), KQU( 6573264560319511662),
+ KQU(18041111951511157441), KQU( 2351433581833135952),
+ KQU( 3113255578908173487), KQU( 1739371330877858784),
+ KQU(16046126562789165480), KQU( 8072101652214192925),
+ KQU(15267091584090664910), KQU( 9309579200403648940),
+ KQU( 5218892439752408722), KQU(14492477246004337115),
+ KQU(17431037586679770619), KQU( 7385248135963250480),
+ KQU( 9580144956565560660), KQU( 4919546228040008720),
+ KQU(15261542469145035584), KQU(18233297270822253102),
+ KQU( 5453248417992302857), KQU( 9309519155931460285),
+ KQU(10342813012345291756), KQU(15676085186784762381),
+ KQU(15912092950691300645), KQU( 9371053121499003195),
+ KQU( 9897186478226866746), KQU(14061858287188196327),
+ KQU( 122575971620788119), KQU(12146750969116317754),
+ KQU( 4438317272813245201), KQU( 8332576791009527119),
+ KQU(13907785691786542057), KQU(10374194887283287467),
+ KQU( 2098798755649059566), KQU( 3416235197748288894),
+ KQU( 8688269957320773484), KQU( 7503964602397371571),
+ KQU(16724977015147478236), KQU( 9461512855439858184),
+ KQU(13259049744534534727), KQU( 3583094952542899294),
+ KQU( 8764245731305528292), KQU(13240823595462088985),
+ KQU(13716141617617910448), KQU(18114969519935960955),
+ KQU( 2297553615798302206), KQU( 4585521442944663362),
+ KQU(17776858680630198686), KQU( 4685873229192163363),
+ KQU( 152558080671135627), KQU(15424900540842670088),
+ KQU(13229630297130024108), KQU(17530268788245718717),
+ KQU(16675633913065714144), KQU( 3158912717897568068),
+ KQU(15399132185380087288), KQU( 7401418744515677872),
+ KQU(13135412922344398535), KQU( 6385314346100509511),
+ KQU(13962867001134161139), KQU(10272780155442671999),
+ KQU(12894856086597769142), KQU(13340877795287554994),
+ KQU(12913630602094607396), KQU(12543167911119793857),
+ KQU(17343570372251873096), KQU(10959487764494150545),
+ KQU( 6966737953093821128), KQU(13780699135496988601),
+ KQU( 4405070719380142046), KQU(14923788365607284982),
+ KQU( 2869487678905148380), KQU( 6416272754197188403),
+ KQU(15017380475943612591), KQU( 1995636220918429487),
+ KQU( 3402016804620122716), KQU(15800188663407057080),
+ KQU(11362369990390932882), KQU(15262183501637986147),
+ KQU(10239175385387371494), KQU( 9352042420365748334),
+ KQU( 1682457034285119875), KQU( 1724710651376289644),
+ KQU( 2038157098893817966), KQU( 9897825558324608773),
+ KQU( 1477666236519164736), KQU(16835397314511233640),
+ KQU(10370866327005346508), KQU(10157504370660621982),
+ KQU(12113904045335882069), KQU(13326444439742783008),
+ KQU(11302769043000765804), KQU(13594979923955228484),
+ KQU(11779351762613475968), KQU( 3786101619539298383),
+ KQU( 8021122969180846063), KQU(15745904401162500495),
+ KQU(10762168465993897267), KQU(13552058957896319026),
+ KQU(11200228655252462013), KQU( 5035370357337441226),
+ KQU( 7593918984545500013), KQU( 5418554918361528700),
+ KQU( 4858270799405446371), KQU( 9974659566876282544),
+ KQU(18227595922273957859), KQU( 2772778443635656220),
+ KQU(14285143053182085385), KQU( 9939700992429600469),
+ KQU(12756185904545598068), KQU( 2020783375367345262),
+ KQU( 57026775058331227), KQU( 950827867930065454),
+ KQU( 6602279670145371217), KQU( 2291171535443566929),
+ KQU( 5832380724425010313), KQU( 1220343904715982285),
+ KQU(17045542598598037633), KQU(15460481779702820971),
+ KQU(13948388779949365130), KQU(13975040175430829518),
+ KQU(17477538238425541763), KQU(11104663041851745725),
+ KQU(15860992957141157587), KQU(14529434633012950138),
+ KQU( 2504838019075394203), KQU( 7512113882611121886),
+ KQU( 4859973559980886617), KQU( 1258601555703250219),
+ KQU(15594548157514316394), KQU( 4516730171963773048),
+ KQU(11380103193905031983), KQU( 6809282239982353344),
+ KQU(18045256930420065002), KQU( 2453702683108791859),
+ KQU( 977214582986981460), KQU( 2006410402232713466),
+ KQU( 6192236267216378358), KQU( 3429468402195675253),
+ KQU(18146933153017348921), KQU(17369978576367231139),
+ KQU( 1246940717230386603), KQU(11335758870083327110),
+ KQU(14166488801730353682), KQU( 9008573127269635732),
+ KQU(10776025389820643815), KQU(15087605441903942962),
+ KQU( 1359542462712147922), KQU(13898874411226454206),
+ KQU(17911176066536804411), KQU( 9435590428600085274),
+ KQU( 294488509967864007), KQU( 8890111397567922046),
+ KQU( 7987823476034328778), KQU(13263827582440967651),
+ KQU( 7503774813106751573), KQU(14974747296185646837),
+ KQU( 8504765037032103375), KQU(17340303357444536213),
+ KQU( 7704610912964485743), KQU( 8107533670327205061),
+ KQU( 9062969835083315985), KQU(16968963142126734184),
+ KQU(12958041214190810180), KQU( 2720170147759570200),
+ KQU( 2986358963942189566), KQU(14884226322219356580),
+ KQU( 286224325144368520), KQU(11313800433154279797),
+ KQU(18366849528439673248), KQU(17899725929482368789),
+ KQU( 3730004284609106799), KQU( 1654474302052767205),
+ KQU( 5006698007047077032), KQU( 8196893913601182838),
+ KQU(15214541774425211640), KQU(17391346045606626073),
+ KQU( 8369003584076969089), KQU( 3939046733368550293),
+ KQU(10178639720308707785), KQU( 2180248669304388697),
+ KQU( 62894391300126322), KQU( 9205708961736223191),
+ KQU( 6837431058165360438), KQU( 3150743890848308214),
+ KQU(17849330658111464583), KQU(12214815643135450865),
+ KQU(13410713840519603402), KQU( 3200778126692046802),
+ KQU(13354780043041779313), KQU( 800850022756886036),
+ KQU(15660052933953067433), KQU( 6572823544154375676),
+ KQU(11030281857015819266), KQU(12682241941471433835),
+ KQU(11654136407300274693), KQU( 4517795492388641109),
+ KQU( 9757017371504524244), KQU(17833043400781889277),
+ KQU(12685085201747792227), KQU(10408057728835019573),
+ KQU( 98370418513455221), KQU( 6732663555696848598),
+ KQU(13248530959948529780), KQU( 3530441401230622826),
+ KQU(18188251992895660615), KQU( 1847918354186383756),
+ KQU( 1127392190402660921), KQU(11293734643143819463),
+ KQU( 3015506344578682982), KQU(13852645444071153329),
+ KQU( 2121359659091349142), KQU( 1294604376116677694),
+ KQU( 5616576231286352318), KQU( 7112502442954235625),
+ KQU(11676228199551561689), KQU(12925182803007305359),
+ KQU( 7852375518160493082), KQU( 1136513130539296154),
+ KQU( 5636923900916593195), KQU( 3221077517612607747),
+ KQU(17784790465798152513), KQU( 3554210049056995938),
+ KQU(17476839685878225874), KQU( 3206836372585575732),
+ KQU( 2765333945644823430), KQU(10080070903718799528),
+ KQU( 5412370818878286353), KQU( 9689685887726257728),
+ KQU( 8236117509123533998), KQU( 1951139137165040214),
+ KQU( 4492205209227980349), KQU(16541291230861602967),
+ KQU( 1424371548301437940), KQU( 9117562079669206794),
+ KQU(14374681563251691625), KQU(13873164030199921303),
+ KQU( 6680317946770936731), KQU(15586334026918276214),
+ KQU(10896213950976109802), KQU( 9506261949596413689),
+ KQU( 9903949574308040616), KQU( 6038397344557204470),
+ KQU( 174601465422373648), KQU(15946141191338238030),
+ KQU(17142225620992044937), KQU( 7552030283784477064),
+ KQU( 2947372384532947997), KQU( 510797021688197711),
+ KQU( 4962499439249363461), KQU( 23770320158385357),
+ KQU( 959774499105138124), KQU( 1468396011518788276),
+ KQU( 2015698006852312308), KQU( 4149400718489980136),
+ KQU( 5992916099522371188), KQU(10819182935265531076),
+ KQU(16189787999192351131), KQU( 342833961790261950),
+ KQU(12470830319550495336), KQU(18128495041912812501),
+ KQU( 1193600899723524337), KQU( 9056793666590079770),
+ KQU( 2154021227041669041), KQU( 4963570213951235735),
+ KQU( 4865075960209211409), KQU( 2097724599039942963),
+ KQU( 2024080278583179845), KQU(11527054549196576736),
+ KQU(10650256084182390252), KQU( 4808408648695766755),
+ KQU( 1642839215013788844), KQU(10607187948250398390),
+ KQU( 7076868166085913508), KQU( 730522571106887032),
+ KQU(12500579240208524895), KQU( 4484390097311355324),
+ KQU(15145801330700623870), KQU( 8055827661392944028),
+ KQU( 5865092976832712268), KQU(15159212508053625143),
+ KQU( 3560964582876483341), KQU( 4070052741344438280),
+ KQU( 6032585709886855634), KQU(15643262320904604873),
+ KQU( 2565119772293371111), KQU( 318314293065348260),
+ KQU(15047458749141511872), KQU( 7772788389811528730),
+ KQU( 7081187494343801976), KQU( 6465136009467253947),
+ KQU(10425940692543362069), KQU( 554608190318339115),
+ KQU(14796699860302125214), KQU( 1638153134431111443),
+ KQU(10336967447052276248), KQU( 8412308070396592958),
+ KQU( 4004557277152051226), KQU( 8143598997278774834),
+ KQU(16413323996508783221), KQU(13139418758033994949),
+ KQU( 9772709138335006667), KQU( 2818167159287157659),
+ KQU(17091740573832523669), KQU(14629199013130751608),
+ KQU(18268322711500338185), KQU( 8290963415675493063),
+ KQU( 8830864907452542588), KQU( 1614839084637494849),
+ KQU(14855358500870422231), KQU( 3472996748392519937),
+ KQU(15317151166268877716), KQU( 5825895018698400362),
+ KQU(16730208429367544129), KQU(10481156578141202800),
+ KQU( 4746166512382823750), KQU(12720876014472464998),
+ KQU( 8825177124486735972), KQU(13733447296837467838),
+ KQU( 6412293741681359625), KQU( 8313213138756135033),
+ KQU(11421481194803712517), KQU( 7997007691544174032),
+ KQU( 6812963847917605930), KQU( 9683091901227558641),
+ KQU(14703594165860324713), KQU( 1775476144519618309),
+ KQU( 2724283288516469519), KQU( 717642555185856868),
+ KQU( 8736402192215092346), KQU(11878800336431381021),
+ KQU( 4348816066017061293), KQU( 6115112756583631307),
+ KQU( 9176597239667142976), KQU(12615622714894259204),
+ KQU(10283406711301385987), KQU( 5111762509485379420),
+ KQU( 3118290051198688449), KQU( 7345123071632232145),
+ KQU( 9176423451688682359), KQU( 4843865456157868971),
+ KQU(12008036363752566088), KQU(12058837181919397720),
+ KQU( 2145073958457347366), KQU( 1526504881672818067),
+ KQU( 3488830105567134848), KQU(13208362960674805143),
+ KQU( 4077549672899572192), KQU( 7770995684693818365),
+ KQU( 1398532341546313593), KQU(12711859908703927840),
+ KQU( 1417561172594446813), KQU(17045191024194170604),
+ KQU( 4101933177604931713), KQU(14708428834203480320),
+ KQU(17447509264469407724), KQU(14314821973983434255),
+ KQU(17990472271061617265), KQU( 5087756685841673942),
+ KQU(12797820586893859939), KQU( 1778128952671092879),
+ KQU( 3535918530508665898), KQU( 9035729701042481301),
+ KQU(14808661568277079962), KQU(14587345077537747914),
+ KQU(11920080002323122708), KQU( 6426515805197278753),
+ KQU( 3295612216725984831), KQU(11040722532100876120),
+ KQU(12305952936387598754), KQU(16097391899742004253),
+ KQU( 4908537335606182208), KQU(12446674552196795504),
+ KQU(16010497855816895177), KQU( 9194378874788615551),
+ KQU( 3382957529567613384), KQU( 5154647600754974077),
+ KQU( 9801822865328396141), KQU( 9023662173919288143),
+ KQU(17623115353825147868), KQU( 8238115767443015816),
+ KQU(15811444159859002560), KQU( 9085612528904059661),
+ KQU( 6888601089398614254), KQU( 258252992894160189),
+ KQU( 6704363880792428622), KQU( 6114966032147235763),
+ KQU(11075393882690261875), KQU( 8797664238933620407),
+ KQU( 5901892006476726920), KQU( 5309780159285518958),
+ KQU(14940808387240817367), KQU(14642032021449656698),
+ KQU( 9808256672068504139), KQU( 3670135111380607658),
+ KQU(11211211097845960152), KQU( 1474304506716695808),
+ KQU(15843166204506876239), KQU( 7661051252471780561),
+ KQU(10170905502249418476), KQU( 7801416045582028589),
+ KQU( 2763981484737053050), KQU( 9491377905499253054),
+ KQU(16201395896336915095), KQU( 9256513756442782198),
+ KQU( 5411283157972456034), KQU( 5059433122288321676),
+ KQU( 4327408006721123357), KQU( 9278544078834433377),
+ KQU( 7601527110882281612), KQU(11848295896975505251),
+ KQU(12096998801094735560), KQU(14773480339823506413),
+ KQU(15586227433895802149), KQU(12786541257830242872),
+ KQU( 6904692985140503067), KQU( 5309011515263103959),
+ KQU(12105257191179371066), KQU(14654380212442225037),
+ KQU( 2556774974190695009), KQU( 4461297399927600261),
+ KQU(14888225660915118646), KQU(14915459341148291824),
+ KQU( 2738802166252327631), KQU( 6047155789239131512),
+ KQU(12920545353217010338), KQU(10697617257007840205),
+ KQU( 2751585253158203504), KQU(13252729159780047496),
+ KQU(14700326134672815469), KQU(14082527904374600529),
+ KQU(16852962273496542070), KQU(17446675504235853907),
+ KQU(15019600398527572311), KQU(12312781346344081551),
+ KQU(14524667935039810450), KQU( 5634005663377195738),
+ KQU(11375574739525000569), KQU( 2423665396433260040),
+ KQU( 5222836914796015410), KQU( 4397666386492647387),
+ KQU( 4619294441691707638), KQU( 665088602354770716),
+ KQU(13246495665281593610), KQU( 6564144270549729409),
+ KQU(10223216188145661688), KQU( 3961556907299230585),
+ KQU(11543262515492439914), KQU(16118031437285993790),
+ KQU( 7143417964520166465), KQU(13295053515909486772),
+ KQU( 40434666004899675), KQU(17127804194038347164),
+ KQU( 8599165966560586269), KQU( 8214016749011284903),
+ KQU(13725130352140465239), KQU( 5467254474431726291),
+ KQU( 7748584297438219877), KQU(16933551114829772472),
+ KQU( 2169618439506799400), KQU( 2169787627665113463),
+ KQU(17314493571267943764), KQU(18053575102911354912),
+ KQU(11928303275378476973), KQU(11593850925061715550),
+ KQU(17782269923473589362), KQU( 3280235307704747039),
+ KQU( 6145343578598685149), KQU(17080117031114086090),
+ KQU(18066839902983594755), KQU( 6517508430331020706),
+ KQU( 8092908893950411541), KQU(12558378233386153732),
+ KQU( 4476532167973132976), KQU(16081642430367025016),
+ KQU( 4233154094369139361), KQU( 8693630486693161027),
+ KQU(11244959343027742285), KQU(12273503967768513508),
+ KQU(14108978636385284876), KQU( 7242414665378826984),
+ KQU( 6561316938846562432), KQU( 8601038474994665795),
+ KQU(17532942353612365904), KQU(17940076637020912186),
+ KQU( 7340260368823171304), KQU( 7061807613916067905),
+ KQU(10561734935039519326), KQU(17990796503724650862),
+ KQU( 6208732943911827159), KQU( 359077562804090617),
+ KQU(14177751537784403113), KQU(10659599444915362902),
+ KQU(15081727220615085833), KQU(13417573895659757486),
+ KQU(15513842342017811524), KQU(11814141516204288231),
+ KQU( 1827312513875101814), KQU( 2804611699894603103),
+ KQU(17116500469975602763), KQU(12270191815211952087),
+ KQU(12256358467786024988), KQU(18435021722453971267),
+ KQU( 671330264390865618), KQU( 476504300460286050),
+ KQU(16465470901027093441), KQU( 4047724406247136402),
+ KQU( 1322305451411883346), KQU( 1388308688834322280),
+ KQU( 7303989085269758176), KQU( 9323792664765233642),
+ KQU( 4542762575316368936), KQU(17342696132794337618),
+ KQU( 4588025054768498379), KQU(13415475057390330804),
+ KQU(17880279491733405570), KQU(10610553400618620353),
+ KQU( 3180842072658960139), KQU(13002966655454270120),
+ KQU( 1665301181064982826), KQU( 7083673946791258979),
+ KQU( 190522247122496820), KQU(17388280237250677740),
+ KQU( 8430770379923642945), KQU(12987180971921668584),
+ KQU( 2311086108365390642), KQU( 2870984383579822345),
+ KQU(14014682609164653318), KQU(14467187293062251484),
+ KQU( 192186361147413298), KQU(15171951713531796524),
+ KQU( 9900305495015948728), KQU(17958004775615466344),
+ KQU(14346380954498606514), KQU(18040047357617407096),
+ KQU( 5035237584833424532), KQU(15089555460613972287),
+ KQU( 4131411873749729831), KQU( 1329013581168250330),
+ KQU(10095353333051193949), KQU(10749518561022462716),
+ KQU( 9050611429810755847), KQU(15022028840236655649),
+ KQU( 8775554279239748298), KQU(13105754025489230502),
+ KQU(15471300118574167585), KQU( 89864764002355628),
+ KQU( 8776416323420466637), KQU( 5280258630612040891),
+ KQU( 2719174488591862912), KQU( 7599309137399661994),
+ KQU(15012887256778039979), KQU(14062981725630928925),
+ KQU(12038536286991689603), KQU( 7089756544681775245),
+ KQU(10376661532744718039), KQU( 1265198725901533130),
+ KQU(13807996727081142408), KQU( 2935019626765036403),
+ KQU( 7651672460680700141), KQU( 3644093016200370795),
+ KQU( 2840982578090080674), KQU(17956262740157449201),
+ KQU(18267979450492880548), KQU(11799503659796848070),
+ KQU( 9942537025669672388), KQU(11886606816406990297),
+ KQU( 5488594946437447576), KQU( 7226714353282744302),
+ KQU( 3784851653123877043), KQU( 878018453244803041),
+ KQU(12110022586268616085), KQU( 734072179404675123),
+ KQU(11869573627998248542), KQU( 469150421297783998),
+ KQU( 260151124912803804), KQU(11639179410120968649),
+ KQU( 9318165193840846253), KQU(12795671722734758075),
+ KQU(15318410297267253933), KQU( 691524703570062620),
+ KQU( 5837129010576994601), KQU(15045963859726941052),
+ KQU( 5850056944932238169), KQU(12017434144750943807),
+ KQU( 7447139064928956574), KQU( 3101711812658245019),
+ KQU(16052940704474982954), KQU(18195745945986994042),
+ KQU( 8932252132785575659), KQU(13390817488106794834),
+ KQU(11582771836502517453), KQU( 4964411326683611686),
+ KQU( 2195093981702694011), KQU(14145229538389675669),
+ KQU(16459605532062271798), KQU( 866316924816482864),
+ KQU( 4593041209937286377), KQU( 8415491391910972138),
+ KQU( 4171236715600528969), KQU(16637569303336782889),
+ KQU( 2002011073439212680), KQU(17695124661097601411),
+ KQU( 4627687053598611702), KQU( 7895831936020190403),
+ KQU( 8455951300917267802), KQU( 2923861649108534854),
+ KQU( 8344557563927786255), KQU( 6408671940373352556),
+ KQU(12210227354536675772), KQU(14294804157294222295),
+ KQU(10103022425071085127), KQU(10092959489504123771),
+ KQU( 6554774405376736268), KQU(12629917718410641774),
+ KQU( 6260933257596067126), KQU( 2460827021439369673),
+ KQU( 2541962996717103668), KQU( 597377203127351475),
+ KQU( 5316984203117315309), KQU( 4811211393563241961),
+ KQU(13119698597255811641), KQU( 8048691512862388981),
+ KQU(10216818971194073842), KQU( 4612229970165291764),
+ KQU(10000980798419974770), KQU( 6877640812402540687),
+ KQU( 1488727563290436992), KQU( 2227774069895697318),
+ KQU(11237754507523316593), KQU(13478948605382290972),
+ KQU( 1963583846976858124), KQU( 5512309205269276457),
+ KQU( 3972770164717652347), KQU( 3841751276198975037),
+ KQU(10283343042181903117), KQU( 8564001259792872199),
+ KQU(16472187244722489221), KQU( 8953493499268945921),
+ KQU( 3518747340357279580), KQU( 4003157546223963073),
+ KQU( 3270305958289814590), KQU( 3966704458129482496),
+ KQU( 8122141865926661939), KQU(14627734748099506653),
+ KQU(13064426990862560568), KQU( 2414079187889870829),
+ KQU( 5378461209354225306), KQU(10841985740128255566),
+ KQU( 538582442885401738), KQU( 7535089183482905946),
+ KQU(16117559957598879095), KQU( 8477890721414539741),
+ KQU( 1459127491209533386), KQU(17035126360733620462),
+ KQU( 8517668552872379126), KQU(10292151468337355014),
+ KQU(17081267732745344157), KQU(13751455337946087178),
+ KQU(14026945459523832966), KQU( 6653278775061723516),
+ KQU(10619085543856390441), KQU( 2196343631481122885),
+ KQU(10045966074702826136), KQU(10082317330452718282),
+ KQU( 5920859259504831242), KQU( 9951879073426540617),
+ KQU( 7074696649151414158), KQU(15808193543879464318),
+ KQU( 7385247772746953374), KQU( 3192003544283864292),
+ KQU(18153684490917593847), KQU(12423498260668568905),
+ KQU(10957758099756378169), KQU(11488762179911016040),
+ KQU( 2099931186465333782), KQU(11180979581250294432),
+ KQU( 8098916250668367933), KQU( 3529200436790763465),
+ KQU(12988418908674681745), KQU( 6147567275954808580),
+ KQU( 3207503344604030989), KQU(10761592604898615360),
+ KQU( 229854861031893504), KQU( 8809853962667144291),
+ KQU(13957364469005693860), KQU( 7634287665224495886),
+ KQU(12353487366976556874), KQU( 1134423796317152034),
+ KQU( 2088992471334107068), KQU( 7393372127190799698),
+ KQU( 1845367839871058391), KQU( 207922563987322884),
+ KQU(11960870813159944976), KQU(12182120053317317363),
+ KQU(17307358132571709283), KQU(13871081155552824936),
+ KQU(18304446751741566262), KQU( 7178705220184302849),
+ KQU(10929605677758824425), KQU(16446976977835806844),
+ KQU(13723874412159769044), KQU( 6942854352100915216),
+ KQU( 1726308474365729390), KQU( 2150078766445323155),
+ KQU(15345558947919656626), KQU(12145453828874527201),
+ KQU( 2054448620739726849), KQU( 2740102003352628137),
+ KQU(11294462163577610655), KQU( 756164283387413743),
+ KQU(17841144758438810880), KQU(10802406021185415861),
+ KQU( 8716455530476737846), KQU( 6321788834517649606),
+ KQU(14681322910577468426), KQU(17330043563884336387),
+ KQU(12701802180050071614), KQU(14695105111079727151),
+ KQU( 5112098511654172830), KQU( 4957505496794139973),
+ KQU( 8270979451952045982), KQU(12307685939199120969),
+ KQU(12425799408953443032), KQU( 8376410143634796588),
+ KQU(16621778679680060464), KQU( 3580497854566660073),
+ KQU( 1122515747803382416), KQU( 857664980960597599),
+ KQU( 6343640119895925918), KQU(12878473260854462891),
+ KQU(10036813920765722626), KQU(14451335468363173812),
+ KQU( 5476809692401102807), KQU(16442255173514366342),
+ KQU(13060203194757167104), KQU(14354124071243177715),
+ KQU(15961249405696125227), KQU(13703893649690872584),
+ KQU( 363907326340340064), KQU( 6247455540491754842),
+ KQU(12242249332757832361), KQU( 156065475679796717),
+ KQU( 9351116235749732355), KQU( 4590350628677701405),
+ KQU( 1671195940982350389), KQU(13501398458898451905),
+ KQU( 6526341991225002255), KQU( 1689782913778157592),
+ KQU( 7439222350869010334), KQU(13975150263226478308),
+ KQU(11411961169932682710), KQU(17204271834833847277),
+ KQU( 541534742544435367), KQU( 6591191931218949684),
+ KQU( 2645454775478232486), KQU( 4322857481256485321),
+ KQU( 8477416487553065110), KQU(12902505428548435048),
+ KQU( 971445777981341415), KQU(14995104682744976712),
+ KQU( 4243341648807158063), KQU( 8695061252721927661),
+ KQU( 5028202003270177222), KQU( 2289257340915567840),
+ KQU(13870416345121866007), KQU(13994481698072092233),
+ KQU( 6912785400753196481), KQU( 2278309315841980139),
+ KQU( 4329765449648304839), KQU( 5963108095785485298),
+ KQU( 4880024847478722478), KQU(16015608779890240947),
+ KQU( 1866679034261393544), KQU( 914821179919731519),
+ KQU( 9643404035648760131), KQU( 2418114953615593915),
+ KQU( 944756836073702374), KQU(15186388048737296834),
+ KQU( 7723355336128442206), KQU( 7500747479679599691),
+ KQU(18013961306453293634), KQU( 2315274808095756456),
+ KQU(13655308255424029566), KQU(17203800273561677098),
+ KQU( 1382158694422087756), KQU( 5090390250309588976),
+ KQU( 517170818384213989), KQU( 1612709252627729621),
+ KQU( 1330118955572449606), KQU( 300922478056709885),
+ KQU(18115693291289091987), KQU(13491407109725238321),
+ KQU(15293714633593827320), KQU( 5151539373053314504),
+ KQU( 5951523243743139207), KQU(14459112015249527975),
+ KQU( 5456113959000700739), KQU( 3877918438464873016),
+ KQU(12534071654260163555), KQU(15871678376893555041),
+ KQU(11005484805712025549), KQU(16353066973143374252),
+ KQU( 4358331472063256685), KQU( 8268349332210859288),
+ KQU(12485161590939658075), KQU(13955993592854471343),
+ KQU( 5911446886848367039), KQU(14925834086813706974),
+ KQU( 6590362597857994805), KQU( 1280544923533661875),
+ KQU( 1637756018947988164), KQU( 4734090064512686329),
+ KQU(16693705263131485912), KQU( 6834882340494360958),
+ KQU( 8120732176159658505), KQU( 2244371958905329346),
+ KQU(10447499707729734021), KQU( 7318742361446942194),
+ KQU( 8032857516355555296), KQU(14023605983059313116),
+ KQU( 1032336061815461376), KQU( 9840995337876562612),
+ KQU( 9869256223029203587), KQU(12227975697177267636),
+ KQU(12728115115844186033), KQU( 7752058479783205470),
+ KQU( 729733219713393087), KQU(12954017801239007622)
+};
+static const uint64_t init_by_array_64_expected[] = {
+ KQU( 2100341266307895239), KQU( 8344256300489757943),
+ KQU(15687933285484243894), KQU( 8268620370277076319),
+ KQU(12371852309826545459), KQU( 8800491541730110238),
+ KQU(18113268950100835773), KQU( 2886823658884438119),
+ KQU( 3293667307248180724), KQU( 9307928143300172731),
+ KQU( 7688082017574293629), KQU( 900986224735166665),
+ KQU( 9977972710722265039), KQU( 6008205004994830552),
+ KQU( 546909104521689292), KQU( 7428471521869107594),
+ KQU(14777563419314721179), KQU(16116143076567350053),
+ KQU( 5322685342003142329), KQU( 4200427048445863473),
+ KQU( 4693092150132559146), KQU(13671425863759338582),
+ KQU( 6747117460737639916), KQU( 4732666080236551150),
+ KQU( 5912839950611941263), KQU( 3903717554504704909),
+ KQU( 2615667650256786818), KQU(10844129913887006352),
+ KQU(13786467861810997820), KQU(14267853002994021570),
+ KQU(13767807302847237439), KQU(16407963253707224617),
+ KQU( 4802498363698583497), KQU( 2523802839317209764),
+ KQU( 3822579397797475589), KQU( 8950320572212130610),
+ KQU( 3745623504978342534), KQU(16092609066068482806),
+ KQU( 9817016950274642398), KQU(10591660660323829098),
+ KQU(11751606650792815920), KQU( 5122873818577122211),
+ KQU(17209553764913936624), KQU( 6249057709284380343),
+ KQU(15088791264695071830), KQU(15344673071709851930),
+ KQU( 4345751415293646084), KQU( 2542865750703067928),
+ KQU(13520525127852368784), KQU(18294188662880997241),
+ KQU( 3871781938044881523), KQU( 2873487268122812184),
+ KQU(15099676759482679005), KQU(15442599127239350490),
+ KQU( 6311893274367710888), KQU( 3286118760484672933),
+ KQU( 4146067961333542189), KQU(13303942567897208770),
+ KQU( 8196013722255630418), KQU( 4437815439340979989),
+ KQU(15433791533450605135), KQU( 4254828956815687049),
+ KQU( 1310903207708286015), KQU(10529182764462398549),
+ KQU(14900231311660638810), KQU( 9727017277104609793),
+ KQU( 1821308310948199033), KQU(11628861435066772084),
+ KQU( 9469019138491546924), KQU( 3145812670532604988),
+ KQU( 9938468915045491919), KQU( 1562447430672662142),
+ KQU(13963995266697989134), KQU( 3356884357625028695),
+ KQU( 4499850304584309747), KQU( 8456825817023658122),
+ KQU(10859039922814285279), KQU( 8099512337972526555),
+ KQU( 348006375109672149), KQU(11919893998241688603),
+ KQU( 1104199577402948826), KQU(16689191854356060289),
+ KQU(10992552041730168078), KQU( 7243733172705465836),
+ KQU( 5668075606180319560), KQU(18182847037333286970),
+ KQU( 4290215357664631322), KQU( 4061414220791828613),
+ KQU(13006291061652989604), KQU( 7140491178917128798),
+ KQU(12703446217663283481), KQU( 5500220597564558267),
+ KQU(10330551509971296358), KQU(15958554768648714492),
+ KQU( 5174555954515360045), KQU( 1731318837687577735),
+ KQU( 3557700801048354857), KQU(13764012341928616198),
+ KQU(13115166194379119043), KQU( 7989321021560255519),
+ KQU( 2103584280905877040), KQU( 9230788662155228488),
+ KQU(16396629323325547654), KQU( 657926409811318051),
+ KQU(15046700264391400727), KQU( 5120132858771880830),
+ KQU( 7934160097989028561), KQU( 6963121488531976245),
+ KQU(17412329602621742089), KQU(15144843053931774092),
+ KQU(17204176651763054532), KQU(13166595387554065870),
+ KQU( 8590377810513960213), KQU( 5834365135373991938),
+ KQU( 7640913007182226243), KQU( 3479394703859418425),
+ KQU(16402784452644521040), KQU( 4993979809687083980),
+ KQU(13254522168097688865), KQU(15643659095244365219),
+ KQU( 5881437660538424982), KQU(11174892200618987379),
+ KQU( 254409966159711077), KQU(17158413043140549909),
+ KQU( 3638048789290376272), KQU( 1376816930299489190),
+ KQU( 4622462095217761923), KQU(15086407973010263515),
+ KQU(13253971772784692238), KQU( 5270549043541649236),
+ KQU(11182714186805411604), KQU(12283846437495577140),
+ KQU( 5297647149908953219), KQU(10047451738316836654),
+ KQU( 4938228100367874746), KQU(12328523025304077923),
+ KQU( 3601049438595312361), KQU( 9313624118352733770),
+ KQU(13322966086117661798), KQU(16660005705644029394),
+ KQU(11337677526988872373), KQU(13869299102574417795),
+ KQU(15642043183045645437), KQU( 3021755569085880019),
+ KQU( 4979741767761188161), KQU(13679979092079279587),
+ KQU( 3344685842861071743), KQU(13947960059899588104),
+ KQU( 305806934293368007), KQU( 5749173929201650029),
+ KQU(11123724852118844098), KQU(15128987688788879802),
+ KQU(15251651211024665009), KQU( 7689925933816577776),
+ KQU(16732804392695859449), KQU(17087345401014078468),
+ KQU(14315108589159048871), KQU( 4820700266619778917),
+ KQU(16709637539357958441), KQU( 4936227875177351374),
+ KQU( 2137907697912987247), KQU(11628565601408395420),
+ KQU( 2333250549241556786), KQU( 5711200379577778637),
+ KQU( 5170680131529031729), KQU(12620392043061335164),
+ KQU( 95363390101096078), KQU( 5487981914081709462),
+ KQU( 1763109823981838620), KQU( 3395861271473224396),
+ KQU( 1300496844282213595), KQU( 6894316212820232902),
+ KQU(10673859651135576674), KQU( 5911839658857903252),
+ KQU(17407110743387299102), KQU( 8257427154623140385),
+ KQU(11389003026741800267), KQU( 4070043211095013717),
+ KQU(11663806997145259025), KQU(15265598950648798210),
+ KQU( 630585789434030934), KQU( 3524446529213587334),
+ KQU( 7186424168495184211), KQU(10806585451386379021),
+ KQU(11120017753500499273), KQU( 1586837651387701301),
+ KQU(17530454400954415544), KQU( 9991670045077880430),
+ KQU( 7550997268990730180), KQU( 8640249196597379304),
+ KQU( 3522203892786893823), KQU(10401116549878854788),
+ KQU(13690285544733124852), KQU( 8295785675455774586),
+ KQU(15535716172155117603), KQU( 3112108583723722511),
+ KQU(17633179955339271113), KQU(18154208056063759375),
+ KQU( 1866409236285815666), KQU(13326075895396412882),
+ KQU( 8756261842948020025), KQU( 6281852999868439131),
+ KQU(15087653361275292858), KQU(10333923911152949397),
+ KQU( 5265567645757408500), KQU(12728041843210352184),
+ KQU( 6347959327507828759), KQU( 154112802625564758),
+ KQU(18235228308679780218), KQU( 3253805274673352418),
+ KQU( 4849171610689031197), KQU(17948529398340432518),
+ KQU(13803510475637409167), KQU(13506570190409883095),
+ KQU(15870801273282960805), KQU( 8451286481299170773),
+ KQU( 9562190620034457541), KQU( 8518905387449138364),
+ KQU(12681306401363385655), KQU( 3788073690559762558),
+ KQU( 5256820289573487769), KQU( 2752021372314875467),
+ KQU( 6354035166862520716), KQU( 4328956378309739069),
+ KQU( 449087441228269600), KQU( 5533508742653090868),
+ KQU( 1260389420404746988), KQU(18175394473289055097),
+ KQU( 1535467109660399420), KQU( 8818894282874061442),
+ KQU(12140873243824811213), KQU(15031386653823014946),
+ KQU( 1286028221456149232), KQU( 6329608889367858784),
+ KQU( 9419654354945132725), KQU( 6094576547061672379),
+ KQU(17706217251847450255), KQU( 1733495073065878126),
+ KQU(16918923754607552663), KQU( 8881949849954945044),
+ KQU(12938977706896313891), KQU(14043628638299793407),
+ KQU(18393874581723718233), KQU( 6886318534846892044),
+ KQU(14577870878038334081), KQU(13541558383439414119),
+ KQU(13570472158807588273), KQU(18300760537910283361),
+ KQU( 818368572800609205), KQU( 1417000585112573219),
+ KQU(12337533143867683655), KQU(12433180994702314480),
+ KQU( 778190005829189083), KQU(13667356216206524711),
+ KQU( 9866149895295225230), KQU(11043240490417111999),
+ KQU( 1123933826541378598), KQU( 6469631933605123610),
+ KQU(14508554074431980040), KQU(13918931242962026714),
+ KQU( 2870785929342348285), KQU(14786362626740736974),
+ KQU(13176680060902695786), KQU( 9591778613541679456),
+ KQU( 9097662885117436706), KQU( 749262234240924947),
+ KQU( 1944844067793307093), KQU( 4339214904577487742),
+ KQU( 8009584152961946551), KQU(16073159501225501777),
+ KQU( 3335870590499306217), KQU(17088312653151202847),
+ KQU( 3108893142681931848), KQU(16636841767202792021),
+ KQU(10423316431118400637), KQU( 8008357368674443506),
+ KQU(11340015231914677875), KQU(17687896501594936090),
+ KQU(15173627921763199958), KQU( 542569482243721959),
+ KQU(15071714982769812975), KQU( 4466624872151386956),
+ KQU( 1901780715602332461), KQU( 9822227742154351098),
+ KQU( 1479332892928648780), KQU( 6981611948382474400),
+ KQU( 7620824924456077376), KQU(14095973329429406782),
+ KQU( 7902744005696185404), KQU(15830577219375036920),
+ KQU(10287076667317764416), KQU(12334872764071724025),
+ KQU( 4419302088133544331), KQU(14455842851266090520),
+ KQU(12488077416504654222), KQU( 7953892017701886766),
+ KQU( 6331484925529519007), KQU( 4902145853785030022),
+ KQU(17010159216096443073), KQU(11945354668653886087),
+ KQU(15112022728645230829), KQU(17363484484522986742),
+ KQU( 4423497825896692887), KQU( 8155489510809067471),
+ KQU( 258966605622576285), KQU( 5462958075742020534),
+ KQU( 6763710214913276228), KQU( 2368935183451109054),
+ KQU(14209506165246453811), KQU( 2646257040978514881),
+ KQU( 3776001911922207672), KQU( 1419304601390147631),
+ KQU(14987366598022458284), KQU( 3977770701065815721),
+ KQU( 730820417451838898), KQU( 3982991703612885327),
+ KQU( 2803544519671388477), KQU(17067667221114424649),
+ KQU( 2922555119737867166), KQU( 1989477584121460932),
+ KQU(15020387605892337354), KQU( 9293277796427533547),
+ KQU(10722181424063557247), KQU(16704542332047511651),
+ KQU( 5008286236142089514), KQU(16174732308747382540),
+ KQU(17597019485798338402), KQU(13081745199110622093),
+ KQU( 8850305883842258115), KQU(12723629125624589005),
+ KQU( 8140566453402805978), KQU(15356684607680935061),
+ KQU(14222190387342648650), KQU(11134610460665975178),
+ KQU( 1259799058620984266), KQU(13281656268025610041),
+ KQU( 298262561068153992), KQU(12277871700239212922),
+ KQU(13911297774719779438), KQU(16556727962761474934),
+ KQU(17903010316654728010), KQU( 9682617699648434744),
+ KQU(14757681836838592850), KQU( 1327242446558524473),
+ KQU(11126645098780572792), KQU( 1883602329313221774),
+ KQU( 2543897783922776873), KQU(15029168513767772842),
+ KQU(12710270651039129878), KQU(16118202956069604504),
+ KQU(15010759372168680524), KQU( 2296827082251923948),
+ KQU(10793729742623518101), KQU(13829764151845413046),
+ KQU(17769301223184451213), KQU( 3118268169210783372),
+ KQU(17626204544105123127), KQU( 7416718488974352644),
+ KQU(10450751996212925994), KQU( 9352529519128770586),
+ KQU( 259347569641110140), KQU( 8048588892269692697),
+ KQU( 1774414152306494058), KQU(10669548347214355622),
+ KQU(13061992253816795081), KQU(18432677803063861659),
+ KQU( 8879191055593984333), KQU(12433753195199268041),
+ KQU(14919392415439730602), KQU( 6612848378595332963),
+ KQU( 6320986812036143628), KQU(10465592420226092859),
+ KQU( 4196009278962570808), KQU( 3747816564473572224),
+ KQU(17941203486133732898), KQU( 2350310037040505198),
+ KQU( 5811779859134370113), KQU(10492109599506195126),
+ KQU( 7699650690179541274), KQU( 1954338494306022961),
+ KQU(14095816969027231152), KQU( 5841346919964852061),
+ KQU(14945969510148214735), KQU( 3680200305887550992),
+ KQU( 6218047466131695792), KQU( 8242165745175775096),
+ KQU(11021371934053307357), KQU( 1265099502753169797),
+ KQU( 4644347436111321718), KQU( 3609296916782832859),
+ KQU( 8109807992218521571), KQU(18387884215648662020),
+ KQU(14656324896296392902), KQU(17386819091238216751),
+ KQU(17788300878582317152), KQU( 7919446259742399591),
+ KQU( 4466613134576358004), KQU(12928181023667938509),
+ KQU(13147446154454932030), KQU(16552129038252734620),
+ KQU( 8395299403738822450), KQU(11313817655275361164),
+ KQU( 434258809499511718), KQU( 2074882104954788676),
+ KQU( 7929892178759395518), KQU( 9006461629105745388),
+ KQU( 5176475650000323086), KQU(11128357033468341069),
+ KQU(12026158851559118955), KQU(14699716249471156500),
+ KQU( 448982497120206757), KQU( 4156475356685519900),
+ KQU( 6063816103417215727), KQU(10073289387954971479),
+ KQU( 8174466846138590962), KQU( 2675777452363449006),
+ KQU( 9090685420572474281), KQU( 6659652652765562060),
+ KQU(12923120304018106621), KQU(11117480560334526775),
+ KQU( 937910473424587511), KQU( 1838692113502346645),
+ KQU(11133914074648726180), KQU( 7922600945143884053),
+ KQU(13435287702700959550), KQU( 5287964921251123332),
+ KQU(11354875374575318947), KQU(17955724760748238133),
+ KQU(13728617396297106512), KQU( 4107449660118101255),
+ KQU( 1210269794886589623), KQU(11408687205733456282),
+ KQU( 4538354710392677887), KQU(13566803319341319267),
+ KQU(17870798107734050771), KQU( 3354318982568089135),
+ KQU( 9034450839405133651), KQU(13087431795753424314),
+ KQU( 950333102820688239), KQU( 1968360654535604116),
+ KQU(16840551645563314995), KQU( 8867501803892924995),
+ KQU(11395388644490626845), KQU( 1529815836300732204),
+ KQU(13330848522996608842), KQU( 1813432878817504265),
+ KQU( 2336867432693429560), KQU(15192805445973385902),
+ KQU( 2528593071076407877), KQU( 128459777936689248),
+ KQU( 9976345382867214866), KQU( 6208885766767996043),
+ KQU(14982349522273141706), KQU( 3099654362410737822),
+ KQU(13776700761947297661), KQU( 8806185470684925550),
+ KQU( 8151717890410585321), KQU( 640860591588072925),
+ KQU(14592096303937307465), KQU( 9056472419613564846),
+ KQU(14861544647742266352), KQU(12703771500398470216),
+ KQU( 3142372800384138465), KQU( 6201105606917248196),
+ KQU(18337516409359270184), KQU(15042268695665115339),
+ KQU(15188246541383283846), KQU(12800028693090114519),
+ KQU( 5992859621101493472), KQU(18278043971816803521),
+ KQU( 9002773075219424560), KQU( 7325707116943598353),
+ KQU( 7930571931248040822), KQU( 5645275869617023448),
+ KQU( 7266107455295958487), KQU( 4363664528273524411),
+ KQU(14313875763787479809), KQU(17059695613553486802),
+ KQU( 9247761425889940932), KQU(13704726459237593128),
+ KQU( 2701312427328909832), KQU(17235532008287243115),
+ KQU(14093147761491729538), KQU( 6247352273768386516),
+ KQU( 8268710048153268415), KQU( 7985295214477182083),
+ KQU(15624495190888896807), KQU( 3772753430045262788),
+ KQU( 9133991620474991698), KQU( 5665791943316256028),
+ KQU( 7551996832462193473), KQU(13163729206798953877),
+ KQU( 9263532074153846374), KQU( 1015460703698618353),
+ KQU(17929874696989519390), KQU(18257884721466153847),
+ KQU(16271867543011222991), KQU( 3905971519021791941),
+ KQU(16814488397137052085), KQU( 1321197685504621613),
+ KQU( 2870359191894002181), KQU(14317282970323395450),
+ KQU(13663920845511074366), KQU( 2052463995796539594),
+ KQU(14126345686431444337), KQU( 1727572121947022534),
+ KQU(17793552254485594241), KQU( 6738857418849205750),
+ KQU( 1282987123157442952), KQU(16655480021581159251),
+ KQU( 6784587032080183866), KQU(14726758805359965162),
+ KQU( 7577995933961987349), KQU(12539609320311114036),
+ KQU(10789773033385439494), KQU( 8517001497411158227),
+ KQU(10075543932136339710), KQU(14838152340938811081),
+ KQU( 9560840631794044194), KQU(17445736541454117475),
+ KQU(10633026464336393186), KQU(15705729708242246293),
+ KQU( 1117517596891411098), KQU( 4305657943415886942),
+ KQU( 4948856840533979263), KQU(16071681989041789593),
+ KQU(13723031429272486527), KQU( 7639567622306509462),
+ KQU(12670424537483090390), KQU( 9715223453097197134),
+ KQU( 5457173389992686394), KQU( 289857129276135145),
+ KQU(17048610270521972512), KQU( 692768013309835485),
+ KQU(14823232360546632057), KQU(18218002361317895936),
+ KQU( 3281724260212650204), KQU(16453957266549513795),
+ KQU( 8592711109774511881), KQU( 929825123473369579),
+ KQU(15966784769764367791), KQU( 9627344291450607588),
+ KQU(10849555504977813287), KQU( 9234566913936339275),
+ KQU( 6413807690366911210), KQU(10862389016184219267),
+ KQU(13842504799335374048), KQU( 1531994113376881174),
+ KQU( 2081314867544364459), KQU(16430628791616959932),
+ KQU( 8314714038654394368), KQU( 9155473892098431813),
+ KQU(12577843786670475704), KQU( 4399161106452401017),
+ KQU( 1668083091682623186), KQU( 1741383777203714216),
+ KQU( 2162597285417794374), KQU(15841980159165218736),
+ KQU( 1971354603551467079), KQU( 1206714764913205968),
+ KQU( 4790860439591272330), KQU(14699375615594055799),
+ KQU( 8374423871657449988), KQU(10950685736472937738),
+ KQU( 697344331343267176), KQU(10084998763118059810),
+ KQU(12897369539795983124), KQU(12351260292144383605),
+ KQU( 1268810970176811234), KQU( 7406287800414582768),
+ KQU( 516169557043807831), KQU( 5077568278710520380),
+ KQU( 3828791738309039304), KQU( 7721974069946943610),
+ KQU( 3534670260981096460), KQU( 4865792189600584891),
+ KQU(16892578493734337298), KQU( 9161499464278042590),
+ KQU(11976149624067055931), KQU(13219479887277343990),
+ KQU(14161556738111500680), KQU(14670715255011223056),
+ KQU( 4671205678403576558), KQU(12633022931454259781),
+ KQU(14821376219869187646), KQU( 751181776484317028),
+ KQU( 2192211308839047070), KQU(11787306362361245189),
+ KQU(10672375120744095707), KQU( 4601972328345244467),
+ KQU(15457217788831125879), KQU( 8464345256775460809),
+ KQU(10191938789487159478), KQU( 6184348739615197613),
+ KQU(11425436778806882100), KQU( 2739227089124319793),
+ KQU( 461464518456000551), KQU( 4689850170029177442),
+ KQU( 6120307814374078625), KQU(11153579230681708671),
+ KQU( 7891721473905347926), KQU(10281646937824872400),
+ KQU( 3026099648191332248), KQU( 8666750296953273818),
+ KQU(14978499698844363232), KQU(13303395102890132065),
+ KQU( 8182358205292864080), KQU(10560547713972971291),
+ KQU(11981635489418959093), KQU( 3134621354935288409),
+ KQU(11580681977404383968), KQU(14205530317404088650),
+ KQU( 5997789011854923157), KQU(13659151593432238041),
+ KQU(11664332114338865086), KQU( 7490351383220929386),
+ KQU( 7189290499881530378), KQU(15039262734271020220),
+ KQU( 2057217285976980055), KQU( 555570804905355739),
+ KQU(11235311968348555110), KQU(13824557146269603217),
+ KQU(16906788840653099693), KQU( 7222878245455661677),
+ KQU( 5245139444332423756), KQU( 4723748462805674292),
+ KQU(12216509815698568612), KQU(17402362976648951187),
+ KQU(17389614836810366768), KQU( 4880936484146667711),
+ KQU( 9085007839292639880), KQU(13837353458498535449),
+ KQU(11914419854360366677), KQU(16595890135313864103),
+ KQU( 6313969847197627222), KQU(18296909792163910431),
+ KQU(10041780113382084042), KQU( 2499478551172884794),
+ KQU(11057894246241189489), KQU( 9742243032389068555),
+ KQU(12838934582673196228), KQU(13437023235248490367),
+ KQU(13372420669446163240), KQU( 6752564244716909224),
+ KQU( 7157333073400313737), KQU(12230281516370654308),
+ KQU( 1182884552219419117), KQU( 2955125381312499218),
+ KQU(10308827097079443249), KQU( 1337648572986534958),
+ KQU(16378788590020343939), KQU( 108619126514420935),
+ KQU( 3990981009621629188), KQU( 5460953070230946410),
+ KQU( 9703328329366531883), KQU(13166631489188077236),
+ KQU( 1104768831213675170), KQU( 3447930458553877908),
+ KQU( 8067172487769945676), KQU( 5445802098190775347),
+ KQU( 3244840981648973873), KQU(17314668322981950060),
+ KQU( 5006812527827763807), KQU(18158695070225526260),
+ KQU( 2824536478852417853), KQU(13974775809127519886),
+ KQU( 9814362769074067392), KQU(17276205156374862128),
+ KQU(11361680725379306967), KQU( 3422581970382012542),
+ KQU(11003189603753241266), KQU(11194292945277862261),
+ KQU( 6839623313908521348), KQU(11935326462707324634),
+ KQU( 1611456788685878444), KQU(13112620989475558907),
+ KQU( 517659108904450427), KQU(13558114318574407624),
+ KQU(15699089742731633077), KQU( 4988979278862685458),
+ KQU( 8111373583056521297), KQU( 3891258746615399627),
+ KQU( 8137298251469718086), KQU(12748663295624701649),
+ KQU( 4389835683495292062), KQU( 5775217872128831729),
+ KQU( 9462091896405534927), KQU( 8498124108820263989),
+ KQU( 8059131278842839525), KQU(10503167994254090892),
+ KQU(11613153541070396656), KQU(18069248738504647790),
+ KQU( 570657419109768508), KQU( 3950574167771159665),
+ KQU( 5514655599604313077), KQU( 2908460854428484165),
+ KQU(10777722615935663114), KQU(12007363304839279486),
+ KQU( 9800646187569484767), KQU( 8795423564889864287),
+ KQU(14257396680131028419), KQU( 6405465117315096498),
+ KQU( 7939411072208774878), KQU(17577572378528990006),
+ KQU(14785873806715994850), KQU(16770572680854747390),
+ KQU(18127549474419396481), KQU(11637013449455757750),
+ KQU(14371851933996761086), KQU( 3601181063650110280),
+ KQU( 4126442845019316144), KQU(10198287239244320669),
+ KQU(18000169628555379659), KQU(18392482400739978269),
+ KQU( 6219919037686919957), KQU( 3610085377719446052),
+ KQU( 2513925039981776336), KQU(16679413537926716955),
+ KQU(12903302131714909434), KQU( 5581145789762985009),
+ KQU(12325955044293303233), KQU(17216111180742141204),
+ KQU( 6321919595276545740), KQU( 3507521147216174501),
+ KQU( 9659194593319481840), KQU(11473976005975358326),
+ KQU(14742730101435987026), KQU( 492845897709954780),
+ KQU(16976371186162599676), KQU(17712703422837648655),
+ KQU( 9881254778587061697), KQU( 8413223156302299551),
+ KQU( 1563841828254089168), KQU( 9996032758786671975),
+ KQU( 138877700583772667), KQU(13003043368574995989),
+ KQU( 4390573668650456587), KQU( 8610287390568126755),
+ KQU(15126904974266642199), KQU( 6703637238986057662),
+ KQU( 2873075592956810157), KQU( 6035080933946049418),
+ KQU(13382846581202353014), KQU( 7303971031814642463),
+ KQU(18418024405307444267), KQU( 5847096731675404647),
+ KQU( 4035880699639842500), KQU(11525348625112218478),
+ KQU( 3041162365459574102), KQU( 2604734487727986558),
+ KQU(15526341771636983145), KQU(14556052310697370254),
+ KQU(12997787077930808155), KQU( 9601806501755554499),
+ KQU(11349677952521423389), KQU(14956777807644899350),
+ KQU(16559736957742852721), KQU(12360828274778140726),
+ KQU( 6685373272009662513), KQU(16932258748055324130),
+ KQU(15918051131954158508), KQU( 1692312913140790144),
+ KQU( 546653826801637367), KQU( 5341587076045986652),
+ KQU(14975057236342585662), KQU(12374976357340622412),
+ KQU(10328833995181940552), KQU(12831807101710443149),
+ KQU(10548514914382545716), KQU( 2217806727199715993),
+ KQU(12627067369242845138), KQU( 4598965364035438158),
+ KQU( 150923352751318171), KQU(14274109544442257283),
+ KQU( 4696661475093863031), KQU( 1505764114384654516),
+ KQU(10699185831891495147), KQU( 2392353847713620519),
+ KQU( 3652870166711788383), KQU( 8640653276221911108),
+ KQU( 3894077592275889704), KQU( 4918592872135964845),
+ KQU(16379121273281400789), KQU(12058465483591683656),
+ KQU(11250106829302924945), KQU( 1147537556296983005),
+ KQU( 6376342756004613268), KQU(14967128191709280506),
+ KQU(18007449949790627628), KQU( 9497178279316537841),
+ KQU( 7920174844809394893), KQU(10037752595255719907),
+ KQU(15875342784985217697), KQU(15311615921712850696),
+ KQU( 9552902652110992950), KQU(14054979450099721140),
+ KQU( 5998709773566417349), KQU(18027910339276320187),
+ KQU( 8223099053868585554), KQU( 7842270354824999767),
+ KQU( 4896315688770080292), KQU(12969320296569787895),
+ KQU( 2674321489185759961), KQU( 4053615936864718439),
+ KQU(11349775270588617578), KQU( 4743019256284553975),
+ KQU( 5602100217469723769), KQU(14398995691411527813),
+ KQU( 7412170493796825470), KQU( 836262406131744846),
+ KQU( 8231086633845153022), KQU( 5161377920438552287),
+ KQU( 8828731196169924949), KQU(16211142246465502680),
+ KQU( 3307990879253687818), KQU( 5193405406899782022),
+ KQU( 8510842117467566693), KQU( 6070955181022405365),
+ KQU(14482950231361409799), KQU(12585159371331138077),
+ KQU( 3511537678933588148), KQU( 2041849474531116417),
+ KQU(10944936685095345792), KQU(18303116923079107729),
+ KQU( 2720566371239725320), KQU( 4958672473562397622),
+ KQU( 3032326668253243412), KQU(13689418691726908338),
+ KQU( 1895205511728843996), KQU( 8146303515271990527),
+ KQU(16507343500056113480), KQU( 473996939105902919),
+ KQU( 9897686885246881481), KQU(14606433762712790575),
+ KQU( 6732796251605566368), KQU( 1399778120855368916),
+ KQU( 935023885182833777), KQU(16066282816186753477),
+ KQU( 7291270991820612055), KQU(17530230393129853844),
+ KQU(10223493623477451366), KQU(15841725630495676683),
+ KQU(17379567246435515824), KQU( 8588251429375561971),
+ KQU(18339511210887206423), KQU(17349587430725976100),
+ KQU(12244876521394838088), KQU( 6382187714147161259),
+ KQU(12335807181848950831), KQU(16948885622305460665),
+ KQU(13755097796371520506), KQU(14806740373324947801),
+ KQU( 4828699633859287703), KQU( 8209879281452301604),
+ KQU(12435716669553736437), KQU(13970976859588452131),
+ KQU( 6233960842566773148), KQU(12507096267900505759),
+ KQU( 1198713114381279421), KQU(14989862731124149015),
+ KQU(15932189508707978949), KQU( 2526406641432708722),
+ KQU( 29187427817271982), KQU( 1499802773054556353),
+ KQU(10816638187021897173), KQU( 5436139270839738132),
+ KQU( 6659882287036010082), KQU( 2154048955317173697),
+ KQU(10887317019333757642), KQU(16281091802634424955),
+ KQU(10754549879915384901), KQU(10760611745769249815),
+ KQU( 2161505946972504002), KQU( 5243132808986265107),
+ KQU(10129852179873415416), KQU( 710339480008649081),
+ KQU( 7802129453068808528), KQU(17967213567178907213),
+ KQU(15730859124668605599), KQU(13058356168962376502),
+ KQU( 3701224985413645909), KQU(14464065869149109264),
+ KQU( 9959272418844311646), KQU(10157426099515958752),
+ KQU(14013736814538268528), KQU(17797456992065653951),
+ KQU(17418878140257344806), KQU(15457429073540561521),
+ KQU( 2184426881360949378), KQU( 2062193041154712416),
+ KQU( 8553463347406931661), KQU( 4913057625202871854),
+ KQU( 2668943682126618425), KQU(17064444737891172288),
+ KQU( 4997115903913298637), KQU(12019402608892327416),
+ KQU(17603584559765897352), KQU(11367529582073647975),
+ KQU( 8211476043518436050), KQU( 8676849804070323674),
+ KQU(18431829230394475730), KQU(10490177861361247904),
+ KQU( 9508720602025651349), KQU( 7409627448555722700),
+ KQU( 5804047018862729008), KQU(11943858176893142594),
+ KQU(11908095418933847092), KQU( 5415449345715887652),
+ KQU( 1554022699166156407), KQU( 9073322106406017161),
+ KQU( 7080630967969047082), KQU(18049736940860732943),
+ KQU(12748714242594196794), KQU( 1226992415735156741),
+ KQU(17900981019609531193), KQU(11720739744008710999),
+ KQU( 3006400683394775434), KQU(11347974011751996028),
+ KQU( 3316999628257954608), KQU( 8384484563557639101),
+ KQU(18117794685961729767), KQU( 1900145025596618194),
+ KQU(17459527840632892676), KQU( 5634784101865710994),
+ KQU( 7918619300292897158), KQU( 3146577625026301350),
+ KQU( 9955212856499068767), KQU( 1873995843681746975),
+ KQU( 1561487759967972194), KQU( 8322718804375878474),
+ KQU(11300284215327028366), KQU( 4667391032508998982),
+ KQU( 9820104494306625580), KQU(17922397968599970610),
+ KQU( 1784690461886786712), KQU(14940365084341346821),
+ KQU( 5348719575594186181), KQU(10720419084507855261),
+ KQU(14210394354145143274), KQU( 2426468692164000131),
+ KQU(16271062114607059202), KQU(14851904092357070247),
+ KQU( 6524493015693121897), KQU( 9825473835127138531),
+ KQU(14222500616268569578), KQU(15521484052007487468),
+ KQU(14462579404124614699), KQU(11012375590820665520),
+ KQU(11625327350536084927), KQU(14452017765243785417),
+ KQU( 9989342263518766305), KQU( 3640105471101803790),
+ KQU( 4749866455897513242), KQU(13963064946736312044),
+ KQU(10007416591973223791), KQU(18314132234717431115),
+ KQU( 3286596588617483450), KQU( 7726163455370818765),
+ KQU( 7575454721115379328), KQU( 5308331576437663422),
+ KQU(18288821894903530934), KQU( 8028405805410554106),
+ KQU(15744019832103296628), KQU( 149765559630932100),
+ KQU( 6137705557200071977), KQU(14513416315434803615),
+ KQU(11665702820128984473), KQU( 218926670505601386),
+ KQU( 6868675028717769519), KQU(15282016569441512302),
+ KQU( 5707000497782960236), KQU( 6671120586555079567),
+ KQU( 2194098052618985448), KQU(16849577895477330978),
+ KQU(12957148471017466283), KQU( 1997805535404859393),
+ KQU( 1180721060263860490), KQU(13206391310193756958),
+ KQU(12980208674461861797), KQU( 3825967775058875366),
+ KQU(17543433670782042631), KQU( 1518339070120322730),
+ KQU(16344584340890991669), KQU( 2611327165318529819),
+ KQU(11265022723283422529), KQU( 4001552800373196817),
+ KQU(14509595890079346161), KQU( 3528717165416234562),
+ KQU(18153222571501914072), KQU( 9387182977209744425),
+ KQU(10064342315985580021), KQU(11373678413215253977),
+ KQU( 2308457853228798099), KQU( 9729042942839545302),
+ KQU( 7833785471140127746), KQU( 6351049900319844436),
+ KQU(14454610627133496067), KQU(12533175683634819111),
+ KQU(15570163926716513029), KQU(13356980519185762498)
+};
+
+TEST_BEGIN(test_gen_rand_32) {
+ uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint32_t r32;
+ sfmt_t *ctx;
+
+ expect_d_le(get_min_array_size32(), BLOCK_SIZE,
+ "Array size too small");
+ ctx = init_gen_rand(1234);
+ fill_array32(ctx, array32, BLOCK_SIZE);
+ fill_array32(ctx, array32_2, BLOCK_SIZE);
+ fini_gen_rand(ctx);
+
+ ctx = init_gen_rand(1234);
+ for (i = 0; i < BLOCK_SIZE; i++) {
+ if (i < COUNT_1) {
+ expect_u32_eq(array32[i], init_gen_rand_32_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r32 = gen_rand32(ctx);
+ expect_u32_eq(r32, array32[i],
+ "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r32 = gen_rand32(ctx);
+ expect_u32_eq(r32, array32_2[i],
+ "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
+ r32);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_by_array_32) {
+ uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0};
+ uint32_t r32;
+ sfmt_t *ctx;
+
+ expect_d_le(get_min_array_size32(), BLOCK_SIZE,
+ "Array size too small");
+ ctx = init_by_array(ini, 4);
+ fill_array32(ctx, array32, BLOCK_SIZE);
+ fill_array32(ctx, array32_2, BLOCK_SIZE);
+ fini_gen_rand(ctx);
+
+ ctx = init_by_array(ini, 4);
+ for (i = 0; i < BLOCK_SIZE; i++) {
+ if (i < COUNT_1) {
+ expect_u32_eq(array32[i], init_by_array_32_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r32 = gen_rand32(ctx);
+ expect_u32_eq(r32, array32[i],
+ "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r32 = gen_rand32(ctx);
+ expect_u32_eq(r32, array32_2[i],
+ "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
+ r32);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_gen_rand_64) {
+ uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint64_t r;
+ sfmt_t *ctx;
+
+ expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ "Array size too small");
+ ctx = init_gen_rand(4321);
+ fill_array64(ctx, array64, BLOCK_SIZE64);
+ fill_array64(ctx, array64_2, BLOCK_SIZE64);
+ fini_gen_rand(ctx);
+
+ ctx = init_gen_rand(4321);
+ for (i = 0; i < BLOCK_SIZE64; i++) {
+ if (i < COUNT_1) {
+ expect_u64_eq(array64[i], init_gen_rand_64_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r = gen_rand64(ctx);
+ expect_u64_eq(r, array64[i],
+ "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
+ array64[i], r);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r = gen_rand64(ctx);
+ expect_u64_eq(r, array64_2[i],
+ "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
+ array64_2[i], r);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_by_array_64) {
+ uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint64_t r;
+ uint32_t ini[] = {5, 4, 3, 2, 1};
+ sfmt_t *ctx;
+
+ expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ "Array size too small");
+ ctx = init_by_array(ini, 5);
+ fill_array64(ctx, array64, BLOCK_SIZE64);
+ fill_array64(ctx, array64_2, BLOCK_SIZE64);
+ fini_gen_rand(ctx);
+
+ ctx = init_by_array(ini, 5);
+ for (i = 0; i < BLOCK_SIZE64; i++) {
+ if (i < COUNT_1) {
+ expect_u64_eq(array64[i], init_by_array_64_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r = gen_rand64(ctx);
+ expect_u64_eq(r, array64[i],
+ "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
+ array64[i], r);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r = gen_rand64(ctx);
+ expect_u64_eq(r, array64_2[i],
+ "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
+ array64_2[i], r);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_gen_rand_32,
+ test_by_array_32,
+ test_gen_rand_64,
+ test_by_array_64);
+}
diff --git a/deps/jemalloc/test/unit/a0.c b/deps/jemalloc/test/unit/a0.c
new file mode 100644
index 0000000..c1be79a
--- /dev/null
+++ b/deps/jemalloc/test/unit/a0.c
@@ -0,0 +1,16 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_a0) {
+ void *p;
+
+ p = a0malloc(1);
+ expect_ptr_not_null(p, "Unexpected a0malloc() error");
+ a0dalloc(p);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_malloc_init(
+ test_a0);
+}
diff --git a/deps/jemalloc/test/unit/arena_decay.c b/deps/jemalloc/test/unit/arena_decay.c
new file mode 100644
index 0000000..e991f4d
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_decay.c
@@ -0,0 +1,436 @@
+#include "test/jemalloc_test.h"
+#include "test/arena_util.h"
+
+#include "jemalloc/internal/ticker.h"
+
+static nstime_monotonic_t *nstime_monotonic_orig;
+static nstime_update_t *nstime_update_orig;
+
+static unsigned nupdates_mock;
+static nstime_t time_mock;
+static bool monotonic_mock;
+
+static bool
+nstime_monotonic_mock(void) {
+ return monotonic_mock;
+}
+
+static void
+nstime_update_mock(nstime_t *time) {
+ nupdates_mock++;
+ if (monotonic_mock) {
+ nstime_copy(time, &time_mock);
+ }
+}
+
+TEST_BEGIN(test_decay_ticks) {
+ test_skip_if(is_background_thread_enabled());
+ test_skip_if(opt_hpa);
+
+ ticker_geom_t *decay_ticker;
+ unsigned tick0, tick1, arena_ind;
+ size_t sz, large0;
+ void *p;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ /* Set up a manually managed arena for test. */
+ arena_ind = do_arena_create(0, 0);
+
+ /* Migrate to the new arena, and get the ticker. */
+ unsigned old_arena_ind;
+ size_t sz_arena_ind = sizeof(old_arena_ind);
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
+ &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
+ "Unexpected mallctl() failure");
+ decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch());
+ expect_ptr_not_null(decay_ticker,
+ "Unexpected failure getting decay ticker");
+
+ /*
+ * Test the standard APIs using a large size class, since we can't
+ * control tcache interactions for small size classes (except by
+ * completely disabling tcache for the entire test program).
+ */
+
+ /* malloc(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = malloc(large0);
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
+ /* free(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ free(p);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
+
+ /* calloc(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = calloc(1, large0);
+ expect_ptr_not_null(p, "Unexpected calloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
+ free(p);
+
+ /* posix_memalign(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
+ "Unexpected posix_memalign() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during posix_memalign()");
+ free(p);
+
+ /* aligned_alloc(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = aligned_alloc(sizeof(size_t), large0);
+ expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during aligned_alloc()");
+ free(p);
+
+ /* realloc(). */
+ /* Allocate. */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = realloc(NULL, large0);
+ expect_ptr_not_null(p, "Unexpected realloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+ /* Reallocate. */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = realloc(p, large0);
+ expect_ptr_not_null(p, "Unexpected realloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+ /* Deallocate. */
+ tick0 = ticker_geom_read(decay_ticker);
+ realloc(p, 0);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+
+ /*
+ * Test the *allocx() APIs using large and small size classes, with
+ * tcache explicitly disabled.
+ */
+ {
+ unsigned i;
+ size_t allocx_sizes[2];
+ allocx_sizes[0] = large0;
+ allocx_sizes[1] = 1;
+
+ for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
+ sz = allocx_sizes[i];
+
+ /* mallocx(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = mallocx(sz, MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during mallocx() (sz=%zu)",
+ sz);
+ /* rallocx(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during rallocx() (sz=%zu)",
+ sz);
+ /* xallocx(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during xallocx() (sz=%zu)",
+ sz);
+ /* dallocx(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ dallocx(p, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during dallocx() (sz=%zu)",
+ sz);
+ /* sdallocx(). */
+ p = mallocx(sz, MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick0 = ticker_geom_read(decay_ticker);
+ sdallocx(p, sz, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during sdallocx() "
+ "(sz=%zu)", sz);
+ }
+ }
+
+ /*
+ * Test tcache fill/flush interactions for large and small size classes,
+ * using an explicit tcache.
+ */
+ unsigned tcache_ind, i;
+ size_t tcache_sizes[2];
+ tcache_sizes[0] = large0;
+ tcache_sizes[1] = 1;
+
+ size_t tcache_max, sz_tcache_max;
+ sz_tcache_max = sizeof(tcache_max);
+ expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
+ &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
+ sz = tcache_sizes[i];
+
+ /* tcache fill. */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during tcache fill "
+ "(sz=%zu)", sz);
+ /* tcache flush. */
+ dallocx(p, MALLOCX_TCACHE(tcache_ind));
+ tick0 = ticker_geom_read(decay_ticker);
+ expect_d_eq(mallctl("tcache.flush", NULL, NULL,
+ (void *)&tcache_ind, sizeof(unsigned)), 0,
+ "Unexpected mallctl failure");
+ tick1 = ticker_geom_read(decay_ticker);
+
+ /* Will only tick if it's in tcache. */
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during tcache flush (sz=%zu)", sz);
+ }
+}
+TEST_END
+
+static void
+decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
+ uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
+#define NINTERVALS 101
+ nstime_t time, update_interval, decay_ms, deadline;
+
+ nstime_init_update(&time);
+
+ nstime_init2(&decay_ms, dt, 0);
+ nstime_copy(&deadline, &time);
+ nstime_add(&deadline, &decay_ms);
+
+ nstime_init2(&update_interval, dt, 0);
+ nstime_idivide(&update_interval, NINTERVALS);
+
+ /*
+ * Keep q's slab from being deallocated during the looping below. If a
+ * cached slab were to repeatedly come and go during looping, it could
+ * prevent the decay backlog ever becoming empty.
+ */
+ void *p = do_mallocx(1, flags);
+ uint64_t dirty_npurge1, muzzy_npurge1;
+ do {
+ for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2;
+ i++) {
+ void *q = do_mallocx(1, flags);
+ dallocx(q, flags);
+ }
+ dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
+ muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
+
+ nstime_add(&time_mock, &update_interval);
+ nstime_update(&time);
+ } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
+ dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
+ !terminate_asap));
+ dallocx(p, flags);
+
+ if (config_stats) {
+ expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
+ muzzy_npurge0, "Expected purging to occur");
+ }
+#undef NINTERVALS
+}
+
+TEST_BEGIN(test_decay_ticker) {
+ test_skip_if(is_background_thread_enabled());
+ test_skip_if(opt_hpa);
+#define NPS 2048
+ ssize_t ddt = opt_dirty_decay_ms;
+ ssize_t mdt = opt_muzzy_decay_ms;
+ unsigned arena_ind = do_arena_create(ddt, mdt);
+ int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
+ void *ps[NPS];
+
+ /*
+ * Allocate a bunch of large objects, pause the clock, deallocate every
+ * other object (to fragment virtual memory), restore the clock, then
+ * [md]allocx() in a tight loop while advancing time rapidly to verify
+ * the ticker triggers purging.
+ */
+ size_t large;
+ size_t sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ do_purge(arena_ind);
+ uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
+ uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
+
+ for (unsigned i = 0; i < NPS; i++) {
+ ps[i] = do_mallocx(large, flags);
+ }
+
+ nupdates_mock = 0;
+ nstime_init_update(&time_mock);
+ monotonic_mock = true;
+
+ nstime_monotonic_orig = nstime_monotonic;
+ nstime_update_orig = nstime_update;
+ nstime_monotonic = nstime_monotonic_mock;
+ nstime_update = nstime_update_mock;
+
+ for (unsigned i = 0; i < NPS; i += 2) {
+ dallocx(ps[i], flags);
+ unsigned nupdates0 = nupdates_mock;
+ do_decay(arena_ind);
+ expect_u_gt(nupdates_mock, nupdates0,
+ "Expected nstime_update() to be called");
+ }
+
+ decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
+ muzzy_npurge0, true);
+ decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
+ muzzy_npurge0, false);
+
+ do_arena_destroy(arena_ind);
+
+ nstime_monotonic = nstime_monotonic_orig;
+ nstime_update = nstime_update_orig;
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_nonmonotonic) {
+ test_skip_if(is_background_thread_enabled());
+ test_skip_if(opt_hpa);
+#define NPS (SMOOTHSTEP_NSTEPS + 1)
+ int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+ void *ps[NPS];
+ uint64_t npurge0 = 0;
+ uint64_t npurge1 = 0;
+ size_t sz, large0;
+ unsigned i, nupdates0;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ do_epoch();
+ sz = sizeof(uint64_t);
+ npurge0 = get_arena_npurge(0);
+
+ nupdates_mock = 0;
+ nstime_init_update(&time_mock);
+ monotonic_mock = false;
+
+ nstime_monotonic_orig = nstime_monotonic;
+ nstime_update_orig = nstime_update;
+ nstime_monotonic = nstime_monotonic_mock;
+ nstime_update = nstime_update_mock;
+
+ for (i = 0; i < NPS; i++) {
+ ps[i] = mallocx(large0, flags);
+ expect_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+ }
+
+ for (i = 0; i < NPS; i++) {
+ dallocx(ps[i], flags);
+ nupdates0 = nupdates_mock;
+ expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.decay failure");
+ expect_u_gt(nupdates_mock, nupdates0,
+ "Expected nstime_update() to be called");
+ }
+
+ do_epoch();
+ sz = sizeof(uint64_t);
+ npurge1 = get_arena_npurge(0);
+
+ if (config_stats) {
+ expect_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
+ }
+
+ nstime_monotonic = nstime_monotonic_orig;
+ nstime_update = nstime_update_orig;
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_now) {
+ test_skip_if(is_background_thread_enabled());
+ test_skip_if(opt_hpa);
+
+ unsigned arena_ind = do_arena_create(0, 0);
+ expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
+ expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
+ size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
+ /* Verify that dirty/muzzy pages never linger after deallocation. */
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ size_t size = sizes[i];
+ generate_dirty(arena_ind, size);
+ expect_zu_eq(get_arena_pdirty(arena_ind), 0,
+ "Unexpected dirty pages");
+ expect_zu_eq(get_arena_pmuzzy(arena_ind), 0,
+ "Unexpected muzzy pages");
+ }
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+TEST_BEGIN(test_decay_never) {
+ test_skip_if(is_background_thread_enabled() || !config_stats);
+ test_skip_if(opt_hpa);
+
+ unsigned arena_ind = do_arena_create(-1, -1);
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+ expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
+ expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
+ size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
+ void *ptrs[sizeof(sizes)/sizeof(size_t)];
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ ptrs[i] = do_mallocx(sizes[i], flags);
+ }
+ /* Verify that each deallocation generates additional dirty pages. */
+ size_t pdirty_prev = get_arena_pdirty(arena_ind);
+ size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
+ expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
+ expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ dallocx(ptrs[i], flags);
+ size_t pdirty = get_arena_pdirty(arena_ind);
+ size_t pmuzzy = get_arena_pmuzzy(arena_ind);
+ expect_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
+ pdirty_prev, "Expected dirty pages to increase.");
+ expect_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
+ pdirty_prev = pdirty;
+ }
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_decay_ticks,
+ test_decay_ticker,
+ test_decay_nonmonotonic,
+ test_decay_now,
+ test_decay_never);
+}
diff --git a/deps/jemalloc/test/unit/arena_decay.sh b/deps/jemalloc/test/unit/arena_decay.sh
new file mode 100644
index 0000000..52f1b20
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_decay.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,tcache_max:1024"
diff --git a/deps/jemalloc/test/unit/arena_reset.c b/deps/jemalloc/test/unit/arena_reset.c
new file mode 100644
index 0000000..8ef0786
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_reset.c
@@ -0,0 +1,361 @@
+#ifndef ARENA_RESET_PROF_C_
+#include "test/jemalloc_test.h"
+#endif
+
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/rtree.h"
+
+#include "test/extent_hooks.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nsmall(void) {
+ return get_nsizes_impl("arenas.nbins");
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_small_size(size_t ind) {
+ return get_size_impl("arenas.bin.0.size", ind);
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+/* Like ivsalloc(), but safe to call on discarded allocations. */
+static size_t
+vsalloc(tsdn_t *tsdn, const void *ptr) {
+ emap_full_alloc_ctx_t full_alloc_ctx;
+ bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
+ ptr, &full_alloc_ctx);
+ if (missing) {
+ return 0;
+ }
+
+ if (full_alloc_ctx.edata == NULL) {
+ return 0;
+ }
+ if (edata_state_get(full_alloc_ctx.edata) != extent_state_active) {
+ return 0;
+ }
+
+ if (full_alloc_ctx.szind == SC_NSIZES) {
+ return 0;
+ }
+
+ return sz_index2size(full_alloc_ctx.szind);
+}
+
+static unsigned
+do_arena_create(extent_hooks_t *h) {
+ unsigned arena_ind;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
+ "Unexpected mallctl() failure");
+ return arena_ind;
+}
+
+static void
+do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
+#define NLARGE 32
+ unsigned nsmall, nlarge, i;
+ size_t sz;
+ int flags;
+ tsdn_t *tsdn;
+
+ flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ nsmall = get_nsmall();
+ nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
+ *nptrs = nsmall + nlarge;
+ *ptrs = (void **)malloc(*nptrs * sizeof(void *));
+ expect_ptr_not_null(*ptrs, "Unexpected malloc() failure");
+
+ /* Allocate objects with a wide range of sizes. */
+ for (i = 0; i < nsmall; i++) {
+ sz = get_small_size(i);
+ (*ptrs)[i] = mallocx(sz, flags);
+ expect_ptr_not_null((*ptrs)[i],
+ "Unexpected mallocx(%zu, %#x) failure", sz, flags);
+ }
+ for (i = 0; i < nlarge; i++) {
+ sz = get_large_size(i);
+ (*ptrs)[nsmall + i] = mallocx(sz, flags);
+ expect_ptr_not_null((*ptrs)[i],
+ "Unexpected mallocx(%zu, %#x) failure", sz, flags);
+ }
+
+ tsdn = tsdn_fetch();
+
+ /* Verify allocations. */
+ for (i = 0; i < *nptrs; i++) {
+ expect_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
+ "Allocation should have queryable size");
+ }
+}
+
+static void
+do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
+ tsdn_t *tsdn;
+ unsigned i;
+
+ tsdn = tsdn_fetch();
+
+ if (have_background_thread) {
+ malloc_mutex_lock(tsdn,
+ &background_thread_info_get(arena_ind)->mtx);
+ }
+ /* Verify allocations no longer exist. */
+ for (i = 0; i < nptrs; i++) {
+ expect_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
+ "Allocation should no longer exist");
+ }
+ if (have_background_thread) {
+ malloc_mutex_unlock(tsdn,
+ &background_thread_info_get(arena_ind)->mtx);
+ }
+
+ free(ptrs);
+}
+
+static void
+do_arena_reset_destroy(const char *name, unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib(name, mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+static void
+do_arena_reset(unsigned arena_ind) {
+ do_arena_reset_destroy("arena.0.reset", arena_ind);
+}
+
+static void
+do_arena_destroy(unsigned arena_ind) {
+ do_arena_reset_destroy("arena.0.destroy", arena_ind);
+}
+
+TEST_BEGIN(test_arena_reset) {
+ unsigned arena_ind;
+ void **ptrs;
+ unsigned nptrs;
+
+ arena_ind = do_arena_create(NULL);
+ do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
+ do_arena_reset(arena_ind);
+ do_arena_reset_post(ptrs, nptrs, arena_ind);
+}
+TEST_END
+
+static bool
+arena_i_initialized(unsigned arena_ind, bool refresh) {
+ bool initialized;
+ size_t mib[3];
+ size_t miblen, sz;
+
+ if (refresh) {
+ uint64_t epoch = 1;
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ sizeof(epoch)), 0, "Unexpected mallctl() failure");
+ }
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ sz = sizeof(initialized);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
+ 0), 0, "Unexpected mallctlbymib() failure");
+
+ return initialized;
+}
+
+TEST_BEGIN(test_arena_destroy_initial) {
+ expect_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ "Destroyed arena stats should not be initialized");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_destroy_hooks_default) {
+ unsigned arena_ind, arena_ind_another, arena_ind_prev;
+ void **ptrs;
+ unsigned nptrs;
+
+ arena_ind = do_arena_create(NULL);
+ do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
+
+ expect_false(arena_i_initialized(arena_ind, false),
+ "Arena stats should not be initialized");
+ expect_true(arena_i_initialized(arena_ind, true),
+ "Arena stats should be initialized");
+
+ /*
+ * Create another arena before destroying one, to better verify arena
+ * index reuse.
+ */
+ arena_ind_another = do_arena_create(NULL);
+
+ do_arena_destroy(arena_ind);
+
+ expect_false(arena_i_initialized(arena_ind, true),
+ "Arena stats should not be initialized");
+ expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ "Destroyed arena stats should be initialized");
+
+ do_arena_reset_post(ptrs, nptrs, arena_ind);
+
+ arena_ind_prev = arena_ind;
+ arena_ind = do_arena_create(NULL);
+ do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
+ expect_u_eq(arena_ind, arena_ind_prev,
+ "Arena index should have been recycled");
+ do_arena_destroy(arena_ind);
+ do_arena_reset_post(ptrs, nptrs, arena_ind);
+
+ do_arena_destroy(arena_ind_another);
+
+ /* Try arena.create with custom hooks. */
+ size_t sz = sizeof(extent_hooks_t *);
+ extent_hooks_t *a0_default_hooks;
+ expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&a0_default_hooks,
+ &sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure");
+
+ /* Default impl; but wrapped as "customized". */
+ extent_hooks_t new_hooks = *a0_default_hooks;
+ extent_hooks_t *hook = &new_hooks;
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ (void *)&hook, sizeof(void *)), 0,
+ "Unexpected mallctl() failure");
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+/*
+ * Actually unmap extents, regardless of opt_retain, so that attempts to access
+ * a destroyed arena's memory will segfault.
+ */
+static bool
+extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
+ "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
+ "true" : "false", arena_ind);
+ expect_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
+ "Wrong hook function");
+ called_dalloc = true;
+ if (!try_dalloc) {
+ return true;
+ }
+ did_dalloc = true;
+ if (!maps_coalesce && opt_retain) {
+ return true;
+ }
+ pages_unmap(addr, size);
+ return false;
+}
+
+static extent_hooks_t hooks_orig;
+
+static extent_hooks_t hooks_unmap = {
+ extent_alloc_hook,
+ extent_dalloc_unmap, /* dalloc */
+ extent_destroy_hook,
+ extent_commit_hook,
+ extent_decommit_hook,
+ extent_purge_lazy_hook,
+ extent_purge_forced_hook,
+ extent_split_hook,
+ extent_merge_hook
+};
+
+TEST_BEGIN(test_arena_destroy_hooks_unmap) {
+ unsigned arena_ind;
+ void **ptrs;
+ unsigned nptrs;
+
+ extent_hooks_prep();
+ if (maps_coalesce) {
+ try_decommit = false;
+ }
+ memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
+ memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
+
+ did_alloc = false;
+ arena_ind = do_arena_create(&hooks);
+ do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
+
+ expect_true(did_alloc, "Expected alloc");
+
+ expect_false(arena_i_initialized(arena_ind, false),
+ "Arena stats should not be initialized");
+ expect_true(arena_i_initialized(arena_ind, true),
+ "Arena stats should be initialized");
+
+ did_dalloc = false;
+ do_arena_destroy(arena_ind);
+ expect_true(did_dalloc, "Expected dalloc");
+
+ expect_false(arena_i_initialized(arena_ind, true),
+ "Arena stats should not be initialized");
+ expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ "Destroyed arena stats should be initialized");
+
+ do_arena_reset_post(ptrs, nptrs, arena_ind);
+
+ memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_arena_reset,
+ test_arena_destroy_initial,
+ test_arena_destroy_hooks_default,
+ test_arena_destroy_hooks_unmap);
+}
diff --git a/deps/jemalloc/test/unit/arena_reset_prof.c b/deps/jemalloc/test/unit/arena_reset_prof.c
new file mode 100644
index 0000000..38d8012
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_reset_prof.c
@@ -0,0 +1,4 @@
+#include "test/jemalloc_test.h"
+#define ARENA_RESET_PROF_C_
+
+#include "arena_reset.c"
diff --git a/deps/jemalloc/test/unit/arena_reset_prof.sh b/deps/jemalloc/test/unit/arena_reset_prof.sh
new file mode 100644
index 0000000..041dc1c
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_reset_prof.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="prof:true,lg_prof_sample:0"
diff --git a/deps/jemalloc/test/unit/atomic.c b/deps/jemalloc/test/unit/atomic.c
new file mode 100644
index 0000000..c2ec8c7
--- /dev/null
+++ b/deps/jemalloc/test/unit/atomic.c
@@ -0,0 +1,229 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for
+ * bool, etc. The one exception is that the short name for void * is "p" in
+ * some places and "ptr" in others. In the long run it would be nice to unify
+ * these, but in the short run we'll use this shim.
+ */
+#define expect_p_eq expect_ptr_eq
+
+/*
+ * t: the non-atomic type, like "uint32_t".
+ * ta: the short name for the type, like "u32".
+ * val[1,2,3]: Values of the given type. The CAS tests use val2 for expected,
+ * and val3 for desired.
+ */
+
+#define DO_TESTS(t, ta, val1, val2, val3) do { \
+ t val; \
+ t expected; \
+ bool success; \
+ /* This (along with the load below) also tests ATOMIC_LOAD. */ \
+ atomic_##ta##_t atom = ATOMIC_INIT(val1); \
+ \
+ /* ATOMIC_INIT and load. */ \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1, val, "Load or init failed"); \
+ \
+ /* Store. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val2, val, "Store failed"); \
+ \
+ /* Exchange. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1, val, "Exchange returned invalid value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val2, val, "Exchange store invalid value"); \
+ \
+ /* \
+ * Weak CAS. Spurious failures are allowed, so we loop a few \
+ * times. \
+ */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ success = false; \
+ for (int retry = 0; retry < 10 && !success; retry++) { \
+ expected = val2; \
+ success = atomic_compare_exchange_weak_##ta(&atom, \
+ &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1, expected, \
+ "CAS should update expected"); \
+ } \
+ expect_b_eq(val1 == val2, success, \
+ "Weak CAS did the wrong state update"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ if (success) { \
+ expect_##ta##_eq(val3, val, \
+ "Successful CAS should update atomic"); \
+ } else { \
+ expect_##ta##_eq(val1, val, \
+ "Unsuccessful CAS should not update atomic"); \
+ } \
+ \
+ /* Strong CAS. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ expected = val2; \
+ success = atomic_compare_exchange_strong_##ta(&atom, &expected, \
+ val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
+ expect_b_eq(val1 == val2, success, \
+ "Strong CAS did the wrong state update"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ if (success) { \
+ expect_##ta##_eq(val3, val, \
+ "Successful CAS should update atomic"); \
+ } else { \
+ expect_##ta##_eq(val1, val, \
+ "Unsuccessful CAS should not update atomic"); \
+ } \
+ \
+ \
+} while (0)
+
+#define DO_INTEGER_TESTS(t, ta, val1, val2) do { \
+ atomic_##ta##_t atom; \
+ t val; \
+ \
+ /* Fetch-add. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1, val, \
+ "Fetch-add should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1 + val2, val, \
+ "Fetch-add should update atomic"); \
+ \
+ /* Fetch-sub. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1, val, \
+ "Fetch-sub should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1 - val2, val, \
+ "Fetch-sub should update atomic"); \
+ \
+ /* Fetch-and. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1, val, \
+ "Fetch-and should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1 & val2, val, \
+ "Fetch-and should update atomic"); \
+ \
+ /* Fetch-or. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1, val, \
+ "Fetch-or should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1 | val2, val, \
+ "Fetch-or should update atomic"); \
+ \
+ /* Fetch-xor. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1, val, \
+ "Fetch-xor should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ expect_##ta##_eq(val1 ^ val2, val, \
+ "Fetch-xor should update atomic"); \
+} while (0)
+
+#define TEST_STRUCT(t, ta) \
+typedef struct { \
+ t val1; \
+ t val2; \
+ t val3; \
+} ta##_test_t;
+
+#define TEST_CASES(t) { \
+ {(t)-1, (t)-1, (t)-2}, \
+ {(t)-1, (t) 0, (t)-2}, \
+ {(t)-1, (t) 1, (t)-2}, \
+ \
+ {(t) 0, (t)-1, (t)-2}, \
+ {(t) 0, (t) 0, (t)-2}, \
+ {(t) 0, (t) 1, (t)-2}, \
+ \
+ {(t) 1, (t)-1, (t)-2}, \
+ {(t) 1, (t) 0, (t)-2}, \
+ {(t) 1, (t) 1, (t)-2}, \
+ \
+ {(t)0, (t)-(1 << 22), (t)-2}, \
+ {(t)0, (t)(1 << 22), (t)-2}, \
+ {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \
+ {(t)(1 << 22), (t)(1 << 22), (t)-2} \
+}
+
+#define TEST_BODY(t, ta) do { \
+ const ta##_test_t tests[] = TEST_CASES(t); \
+ for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \
+ ta##_test_t test = tests[i]; \
+ DO_TESTS(t, ta, test.val1, test.val2, test.val3); \
+ } \
+} while (0)
+
+#define INTEGER_TEST_BODY(t, ta) do { \
+ const ta##_test_t tests[] = TEST_CASES(t); \
+ for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \
+ ta##_test_t test = tests[i]; \
+ DO_TESTS(t, ta, test.val1, test.val2, test.val3); \
+ DO_INTEGER_TESTS(t, ta, test.val1, test.val2); \
+ } \
+} while (0)
+
+TEST_STRUCT(uint64_t, u64);
+TEST_BEGIN(test_atomic_u64) {
+#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
+ test_skip("64-bit atomic operations not supported");
+#else
+ INTEGER_TEST_BODY(uint64_t, u64);
+#endif
+}
+TEST_END
+
+
+TEST_STRUCT(uint32_t, u32);
+TEST_BEGIN(test_atomic_u32) {
+ INTEGER_TEST_BODY(uint32_t, u32);
+}
+TEST_END
+
+TEST_STRUCT(void *, p);
+TEST_BEGIN(test_atomic_p) {
+ TEST_BODY(void *, p);
+}
+TEST_END
+
+TEST_STRUCT(size_t, zu);
+TEST_BEGIN(test_atomic_zu) {
+ INTEGER_TEST_BODY(size_t, zu);
+}
+TEST_END
+
+TEST_STRUCT(ssize_t, zd);
+TEST_BEGIN(test_atomic_zd) {
+ INTEGER_TEST_BODY(ssize_t, zd);
+}
+TEST_END
+
+
+TEST_STRUCT(unsigned, u);
+TEST_BEGIN(test_atomic_u) {
+ INTEGER_TEST_BODY(unsigned, u);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_atomic_u64,
+ test_atomic_u32,
+ test_atomic_p,
+ test_atomic_zu,
+ test_atomic_zd,
+ test_atomic_u);
+}
diff --git a/deps/jemalloc/test/unit/background_thread.c b/deps/jemalloc/test/unit/background_thread.c
new file mode 100644
index 0000000..c60010a
--- /dev/null
+++ b/deps/jemalloc/test/unit/background_thread.c
@@ -0,0 +1,118 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/util.h"
+
+static void
+test_switch_background_thread_ctl(bool new_val) {
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+
+ e1 = new_val;
+ expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
+ &e1, sz), 0, "Unexpected mallctl() failure");
+ expect_b_eq(e0, !e1,
+ "background_thread should be %d before.\n", !e1);
+ if (e1) {
+ expect_zu_gt(n_background_threads, 0,
+ "Number of background threads should be non zero.\n");
+ } else {
+ expect_zu_eq(n_background_threads, 0,
+ "Number of background threads should be zero.\n");
+ }
+}
+
+static void
+test_repeat_background_thread_ctl(bool before) {
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+
+ e1 = before;
+ expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
+ &e1, sz), 0, "Unexpected mallctl() failure");
+ expect_b_eq(e0, before,
+ "background_thread should be %d.\n", before);
+ if (e1) {
+ expect_zu_gt(n_background_threads, 0,
+ "Number of background threads should be non zero.\n");
+ } else {
+ expect_zu_eq(n_background_threads, 0,
+ "Number of background threads should be zero.\n");
+ }
+}
+
+TEST_BEGIN(test_background_thread_ctl) {
+ test_skip_if(!have_background_thread);
+
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+
+ expect_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ expect_d_eq(mallctl("background_thread", (void *)&e1, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ expect_b_eq(e0, e1,
+ "Default and opt.background_thread does not match.\n");
+ if (e0) {
+ test_switch_background_thread_ctl(false);
+ }
+ expect_zu_eq(n_background_threads, 0,
+ "Number of background threads should be 0.\n");
+
+ for (unsigned i = 0; i < 4; i++) {
+ test_switch_background_thread_ctl(true);
+ test_repeat_background_thread_ctl(true);
+ test_repeat_background_thread_ctl(true);
+
+ test_switch_background_thread_ctl(false);
+ test_repeat_background_thread_ctl(false);
+ test_repeat_background_thread_ctl(false);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_background_thread_running) {
+ test_skip_if(!have_background_thread);
+ test_skip_if(!config_stats);
+
+#if defined(JEMALLOC_BACKGROUND_THREAD)
+ tsd_t *tsd = tsd_fetch();
+ background_thread_info_t *info = &background_thread_info[0];
+
+ test_repeat_background_thread_ctl(false);
+ test_switch_background_thread_ctl(true);
+ expect_b_eq(info->state, background_thread_started,
+ "Background_thread did not start.\n");
+
+ nstime_t start;
+ nstime_init_update(&start);
+
+ bool ran = false;
+ while (true) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ if (info->tot_n_runs > 0) {
+ ran = true;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ if (ran) {
+ break;
+ }
+
+ nstime_t now;
+ nstime_init_update(&now);
+ nstime_subtract(&now, &start);
+ expect_u64_lt(nstime_sec(&now), 1000,
+ "Background threads did not run for 1000 seconds.");
+ sleep(1);
+ }
+ test_switch_background_thread_ctl(false);
+#endif
+}
+TEST_END
+
+int
+main(void) {
+ /* Background_thread creation tests reentrancy naturally. */
+ return test_no_reentrancy(
+ test_background_thread_ctl,
+ test_background_thread_running);
+}
diff --git a/deps/jemalloc/test/unit/background_thread_enable.c b/deps/jemalloc/test/unit/background_thread_enable.c
new file mode 100644
index 0000000..44034ac
--- /dev/null
+++ b/deps/jemalloc/test/unit/background_thread_enable.c
@@ -0,0 +1,96 @@
+#include "test/jemalloc_test.h"
+
+const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20";
+
+static unsigned
+max_test_narenas(void) {
+ /*
+ * 10 here is somewhat arbitrary, except insofar as we want to ensure
+ * that the number of background threads is smaller than the number of
+ * arenas. I'll ragequit long before we have to spin up 10 threads per
+ * cpu to handle background purging, so this is a conservative
+ * approximation.
+ */
+ unsigned ret = 10 * ncpus;
+ /* Limit the max to avoid VM exhaustion on 32-bit . */
+ if (ret > 512) {
+ ret = 512;
+ }
+
+ return ret;
+}
+
+TEST_BEGIN(test_deferred) {
+ test_skip_if(!have_background_thread);
+
+ unsigned id;
+ size_t sz_u = sizeof(unsigned);
+
+ for (unsigned i = 0; i < max_test_narenas(); i++) {
+ expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
+ "Failed to create arena");
+ }
+
+ bool enable = true;
+ size_t sz_b = sizeof(bool);
+ expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ "Failed to enable background threads");
+ enable = false;
+ expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ "Failed to disable background threads");
+}
+TEST_END
+
+TEST_BEGIN(test_max_background_threads) {
+ test_skip_if(!have_background_thread);
+
+ size_t max_n_thds;
+ size_t opt_max_n_thds;
+ size_t sz_m = sizeof(max_n_thds);
+ expect_d_eq(mallctl("opt.max_background_threads",
+ &opt_max_n_thds, &sz_m, NULL, 0), 0,
+ "Failed to get opt.max_background_threads");
+ expect_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
+ 0), 0, "Failed to get max background threads");
+ expect_zu_eq(opt_max_n_thds, max_n_thds,
+ "max_background_threads and "
+ "opt.max_background_threads should match");
+ expect_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
+ sz_m), 0, "Failed to set max background threads");
+
+ unsigned id;
+ size_t sz_u = sizeof(unsigned);
+
+ for (unsigned i = 0; i < max_test_narenas(); i++) {
+ expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
+ "Failed to create arena");
+ }
+
+ bool enable = true;
+ size_t sz_b = sizeof(bool);
+ expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ "Failed to enable background threads");
+ expect_zu_eq(n_background_threads, max_n_thds,
+ "Number of background threads should not change.\n");
+ size_t new_max_thds = max_n_thds - 1;
+ if (new_max_thds > 0) {
+ expect_d_eq(mallctl("max_background_threads", NULL, NULL,
+ &new_max_thds, sz_m), 0,
+ "Failed to set max background threads");
+ expect_zu_eq(n_background_threads, new_max_thds,
+ "Number of background threads should decrease by 1.\n");
+ }
+ new_max_thds = 1;
+ expect_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
+ sz_m), 0, "Failed to set max background threads");
+ expect_zu_eq(n_background_threads, new_max_thds,
+ "Number of background threads should be 1.\n");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_deferred,
+ test_max_background_threads);
+}
diff --git a/deps/jemalloc/test/unit/base.c b/deps/jemalloc/test/unit/base.c
new file mode 100644
index 0000000..15e04a8
--- /dev/null
+++ b/deps/jemalloc/test/unit/base.c
@@ -0,0 +1,265 @@
+#include "test/jemalloc_test.h"
+
+#include "test/extent_hooks.h"
+
+static extent_hooks_t hooks_null = {
+ extent_alloc_hook,
+ NULL, /* dalloc */
+ NULL, /* destroy */
+ NULL, /* commit */
+ NULL, /* decommit */
+ NULL, /* purge_lazy */
+ NULL, /* purge_forced */
+ NULL, /* split */
+ NULL /* merge */
+};
+
+static extent_hooks_t hooks_not_null = {
+ extent_alloc_hook,
+ extent_dalloc_hook,
+ extent_destroy_hook,
+ NULL, /* commit */
+ extent_decommit_hook,
+ extent_purge_lazy_hook,
+ extent_purge_forced_hook,
+ NULL, /* split */
+ NULL /* merge */
+};
+
+TEST_BEGIN(test_base_hooks_default) {
+ base_t *base;
+ size_t allocated0, allocated1, resident, mapped, n_thp;
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ base = base_new(tsdn, 0,
+ (extent_hooks_t *)&ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+
+ if (config_stats) {
+ base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
+ &n_thp);
+ expect_zu_ge(allocated0, sizeof(base_t),
+ "Base header should count as allocated");
+ if (opt_metadata_thp == metadata_thp_always) {
+ expect_zu_gt(n_thp, 0,
+ "Base should have 1 THP at least.");
+ }
+ }
+
+ expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
+ "Unexpected base_alloc() failure");
+
+ if (config_stats) {
+ base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
+ &n_thp);
+ expect_zu_ge(allocated1 - allocated0, 42,
+ "At least 42 bytes were allocated by base_alloc()");
+ }
+
+ base_delete(tsdn, base);
+}
+TEST_END
+
+TEST_BEGIN(test_base_hooks_null) {
+ extent_hooks_t hooks_orig;
+ base_t *base;
+ size_t allocated0, allocated1, resident, mapped, n_thp;
+
+ extent_hooks_prep();
+ try_dalloc = false;
+ try_destroy = true;
+ try_decommit = false;
+ try_purge_lazy = false;
+ try_purge_forced = false;
+ memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
+ memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t));
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new() failure");
+
+ if (config_stats) {
+ base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
+ &n_thp);
+ expect_zu_ge(allocated0, sizeof(base_t),
+ "Base header should count as allocated");
+ if (opt_metadata_thp == metadata_thp_always) {
+ expect_zu_gt(n_thp, 0,
+ "Base should have 1 THP at least.");
+ }
+ }
+
+ expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
+ "Unexpected base_alloc() failure");
+
+ if (config_stats) {
+ base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
+ &n_thp);
+ expect_zu_ge(allocated1 - allocated0, 42,
+ "At least 42 bytes were allocated by base_alloc()");
+ }
+
+ base_delete(tsdn, base);
+
+ memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
+}
+TEST_END
+
+TEST_BEGIN(test_base_hooks_not_null) {
+ extent_hooks_t hooks_orig;
+ base_t *base;
+ void *p, *q, *r, *r_exp;
+
+ extent_hooks_prep();
+ try_dalloc = false;
+ try_destroy = true;
+ try_decommit = false;
+ try_purge_lazy = false;
+ try_purge_forced = false;
+ memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
+ memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ did_alloc = false;
+ base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new() failure");
+ expect_true(did_alloc, "Expected alloc");
+
+ /*
+ * Check for tight packing at specified alignment under simple
+ * conditions.
+ */
+ {
+ const size_t alignments[] = {
+ 1,
+ QUANTUM,
+ QUANTUM << 1,
+ CACHELINE,
+ CACHELINE << 1,
+ };
+ unsigned i;
+
+ for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
+ size_t alignment = alignments[i];
+ size_t align_ceil = ALIGNMENT_CEILING(alignment,
+ QUANTUM);
+ p = base_alloc(tsdn, base, 1, alignment);
+ expect_ptr_not_null(p,
+ "Unexpected base_alloc() failure");
+ expect_ptr_eq(p,
+ (void *)(ALIGNMENT_CEILING((uintptr_t)p,
+ alignment)), "Expected quantum alignment");
+ q = base_alloc(tsdn, base, alignment, alignment);
+ expect_ptr_not_null(q,
+ "Unexpected base_alloc() failure");
+ expect_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
+ "Minimal allocation should take up %zu bytes",
+ align_ceil);
+ r = base_alloc(tsdn, base, 1, alignment);
+ expect_ptr_not_null(r,
+ "Unexpected base_alloc() failure");
+ expect_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
+ "Minimal allocation should take up %zu bytes",
+ align_ceil);
+ }
+ }
+
+ /*
+ * Allocate an object that cannot fit in the first block, then verify
+ * that the first block's remaining space is considered for subsequent
+ * allocation.
+ */
+ expect_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
+ "Remainder insufficient for test");
+ /* Use up all but one quantum of block. */
+ while (edata_bsize_get(&base->blocks->edata) > QUANTUM) {
+ p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
+ expect_ptr_not_null(p, "Unexpected base_alloc() failure");
+ }
+ r_exp = edata_addr_get(&base->blocks->edata);
+ expect_zu_eq(base->extent_sn_next, 1, "One extant block expected");
+ q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
+ expect_ptr_not_null(q, "Unexpected base_alloc() failure");
+ expect_ptr_ne(q, r_exp, "Expected allocation from new block");
+ expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
+ r = base_alloc(tsdn, base, QUANTUM, QUANTUM);
+ expect_ptr_not_null(r, "Unexpected base_alloc() failure");
+ expect_ptr_eq(r, r_exp, "Expected allocation from first block");
+ expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
+
+ /*
+ * Check for proper alignment support when normal blocks are too small.
+ */
+ {
+ const size_t alignments[] = {
+ HUGEPAGE,
+ HUGEPAGE << 1
+ };
+ unsigned i;
+
+ for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
+ size_t alignment = alignments[i];
+ p = base_alloc(tsdn, base, QUANTUM, alignment);
+ expect_ptr_not_null(p,
+ "Unexpected base_alloc() failure");
+ expect_ptr_eq(p,
+ (void *)(ALIGNMENT_CEILING((uintptr_t)p,
+ alignment)), "Expected %zu-byte alignment",
+ alignment);
+ }
+ }
+
+ called_dalloc = called_destroy = called_decommit = called_purge_lazy =
+ called_purge_forced = false;
+ base_delete(tsdn, base);
+ expect_true(called_dalloc, "Expected dalloc call");
+ expect_true(!called_destroy, "Unexpected destroy call");
+ expect_true(called_decommit, "Expected decommit call");
+ expect_true(called_purge_lazy, "Expected purge_lazy call");
+ expect_true(called_purge_forced, "Expected purge_forced call");
+
+ try_dalloc = true;
+ try_destroy = true;
+ try_decommit = true;
+ try_purge_lazy = true;
+ try_purge_forced = true;
+ memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
+}
+TEST_END
+
+TEST_BEGIN(test_base_ehooks_get_for_metadata_default_hook) {
+ extent_hooks_prep();
+ memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
+ base_t *base;
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ false);
+ ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
+ expect_true(ehooks_are_default(ehooks),
+ "Expected default extent hook functions pointer");
+ base_delete(tsdn, base);
+}
+TEST_END
+
+
+TEST_BEGIN(test_base_ehooks_get_for_metadata_custom_hook) {
+ extent_hooks_prep();
+ memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
+ base_t *base;
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
+ ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
+ expect_ptr_eq(&hooks, ehooks_get_extent_hooks_ptr(ehooks),
+ "Expected user-specified extend hook functions pointer");
+ base_delete(tsdn, base);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_base_hooks_default,
+ test_base_hooks_null,
+ test_base_hooks_not_null,
+ test_base_ehooks_get_for_metadata_default_hook,
+ test_base_ehooks_get_for_metadata_custom_hook);
+}
diff --git a/deps/jemalloc/test/unit/batch_alloc.c b/deps/jemalloc/test/unit/batch_alloc.c
new file mode 100644
index 0000000..901c52b
--- /dev/null
+++ b/deps/jemalloc/test/unit/batch_alloc.c
@@ -0,0 +1,189 @@
+#include "test/jemalloc_test.h"
+
+#define BATCH_MAX ((1U << 16) + 1024)
+static void *global_ptrs[BATCH_MAX];
+
+#define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0)
+
+static void
+verify_batch_basic(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
+ bool zero) {
+ for (size_t i = 0; i < batch; ++i) {
+ void *p = ptrs[i];
+ expect_zu_eq(isalloc(tsd_tsdn(tsd), p), usize, "");
+ if (zero) {
+ for (size_t k = 0; k < usize; ++k) {
+ expect_true(*((unsigned char *)p + k) == 0, "");
+ }
+ }
+ }
+}
+
+static void
+verify_batch_locality(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
+ arena_t *arena, unsigned nregs) {
+ if (config_prof && opt_prof) {
+ /*
+ * Checking batch locality when prof is on is feasible but
+ * complicated, while checking the non-prof case suffices for
+ * unit-test purpose.
+ */
+ return;
+ }
+ for (size_t i = 0, j = 0; i < batch; ++i, ++j) {
+ if (j == nregs) {
+ j = 0;
+ }
+ if (j == 0 && batch - i < nregs) {
+ break;
+ }
+ void *p = ptrs[i];
+ expect_ptr_eq(iaalloc(tsd_tsdn(tsd), p), arena, "");
+ if (j == 0) {
+ expect_true(PAGE_ALIGNED(p), "");
+ continue;
+ }
+ assert(i > 0);
+ void *q = ptrs[i - 1];
+ expect_true((uintptr_t)p > (uintptr_t)q
+ && (size_t)((uintptr_t)p - (uintptr_t)q) == usize, "");
+ }
+}
+
+static void
+release_batch(void **ptrs, size_t batch, size_t size) {
+ for (size_t i = 0; i < batch; ++i) {
+ sdallocx(ptrs[i], size, 0);
+ }
+}
+
+typedef struct batch_alloc_packet_s batch_alloc_packet_t;
+struct batch_alloc_packet_s {
+ void **ptrs;
+ size_t num;
+ size_t size;
+ int flags;
+};
+
+static size_t
+batch_alloc_wrapper(void **ptrs, size_t num, size_t size, int flags) {
+ batch_alloc_packet_t batch_alloc_packet = {ptrs, num, size, flags};
+ size_t filled;
+ size_t len = sizeof(size_t);
+ assert_d_eq(mallctl("experimental.batch_alloc", &filled, &len,
+ &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, "");
+ return filled;
+}
+
+static void
+test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) {
+ tsd_t *tsd = tsd_fetch();
+ assert(tsd != NULL);
+ const size_t usize =
+ (alignment != 0 ? sz_sa2u(size, alignment) : sz_s2u(size));
+ const szind_t ind = sz_size2index(usize);
+ const bin_info_t *bin_info = &bin_infos[ind];
+ const unsigned nregs = bin_info->nregs;
+ assert(nregs > 0);
+ arena_t *arena;
+ if (arena_flag != 0) {
+ arena = arena_get(tsd_tsdn(tsd), MALLOCX_ARENA_GET(arena_flag),
+ false);
+ } else {
+ arena = arena_choose(tsd, NULL);
+ }
+ assert(arena != NULL);
+ int flags = arena_flag;
+ if (alignment != 0) {
+ flags |= MALLOCX_ALIGN(alignment);
+ }
+ if (zero) {
+ flags |= MALLOCX_ZERO;
+ }
+
+ /*
+ * Allocate for the purpose of bootstrapping arena_tdata, so that the
+ * change in bin stats won't contaminate the stats to be verified below.
+ */
+ void *p = mallocx(size, flags | MALLOCX_TCACHE_NONE);
+
+ for (size_t i = 0; i < 4; ++i) {
+ size_t base = 0;
+ if (i == 1) {
+ base = nregs;
+ } else if (i == 2) {
+ base = nregs * 2;
+ } else if (i == 3) {
+ base = (1 << 16);
+ }
+ for (int j = -1; j <= 1; ++j) {
+ if (base == 0 && j == -1) {
+ continue;
+ }
+ size_t batch = base + (size_t)j;
+ assert(batch < BATCH_MAX);
+ size_t filled = batch_alloc_wrapper(global_ptrs, batch,
+ size, flags);
+ assert_zu_eq(filled, batch, "");
+ verify_batch_basic(tsd, global_ptrs, batch, usize,
+ zero);
+ verify_batch_locality(tsd, global_ptrs, batch, usize,
+ arena, nregs);
+ release_batch(global_ptrs, batch, usize);
+ }
+ }
+
+ free(p);
+}
+
+TEST_BEGIN(test_batch_alloc) {
+ test_wrapper(11, 0, false, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_batch_alloc_zero) {
+ test_wrapper(11, 0, true, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_batch_alloc_aligned) {
+ test_wrapper(7, 16, false, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_batch_alloc_manual_arena) {
+ unsigned arena_ind;
+ size_t len_unsigned = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", &arena_ind, &len_unsigned, NULL,
+ 0), 0, "");
+ test_wrapper(11, 0, false, MALLOCX_ARENA(arena_ind));
+}
+TEST_END
+
+TEST_BEGIN(test_batch_alloc_large) {
+ size_t size = SC_LARGE_MINCLASS;
+ for (size_t batch = 0; batch < 4; ++batch) {
+ assert(batch < BATCH_MAX);
+ size_t filled = batch_alloc(global_ptrs, batch, size, 0);
+ assert_zu_eq(filled, batch, "");
+ release_batch(global_ptrs, batch, size);
+ }
+ size = tcache_maxclass + 1;
+ for (size_t batch = 0; batch < 4; ++batch) {
+ assert(batch < BATCH_MAX);
+ size_t filled = batch_alloc(global_ptrs, batch, size, 0);
+ assert_zu_eq(filled, batch, "");
+ release_batch(global_ptrs, batch, size);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_batch_alloc,
+ test_batch_alloc_zero,
+ test_batch_alloc_aligned,
+ test_batch_alloc_manual_arena,
+ test_batch_alloc_large);
+}
diff --git a/deps/jemalloc/test/unit/batch_alloc.sh b/deps/jemalloc/test/unit/batch_alloc.sh
new file mode 100644
index 0000000..9d81010
--- /dev/null
+++ b/deps/jemalloc/test/unit/batch_alloc.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="tcache_gc_incr_bytes:2147483648"
diff --git a/deps/jemalloc/test/unit/batch_alloc_prof.c b/deps/jemalloc/test/unit/batch_alloc_prof.c
new file mode 100644
index 0000000..ef64458
--- /dev/null
+++ b/deps/jemalloc/test/unit/batch_alloc_prof.c
@@ -0,0 +1 @@
+#include "batch_alloc.c"
diff --git a/deps/jemalloc/test/unit/batch_alloc_prof.sh b/deps/jemalloc/test/unit/batch_alloc_prof.sh
new file mode 100644
index 0000000..a2697a6
--- /dev/null
+++ b/deps/jemalloc/test/unit/batch_alloc_prof.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="prof:true,lg_prof_sample:14"
diff --git a/deps/jemalloc/test/unit/binshard.c b/deps/jemalloc/test/unit/binshard.c
new file mode 100644
index 0000000..040ea54
--- /dev/null
+++ b/deps/jemalloc/test/unit/binshard.c
@@ -0,0 +1,154 @@
+#include "test/jemalloc_test.h"
+
+/* Config -- "narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" */
+
+#define NTHREADS 16
+#define REMOTE_NALLOC 256
+
+static void *
+thd_producer(void *varg) {
+ void **mem = varg;
+ unsigned arena, i;
+ size_t sz;
+
+ sz = sizeof(arena);
+ /* Remote arena. */
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ for (i = 0; i < REMOTE_NALLOC / 2; i++) {
+ mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena));
+ }
+
+ /* Remote bin. */
+ for (; i < REMOTE_NALLOC; i++) {
+ mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(0));
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_producer_consumer) {
+ thd_t thds[NTHREADS];
+ void *mem[NTHREADS][REMOTE_NALLOC];
+ unsigned i;
+
+ /* Create producer threads to allocate. */
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_producer, mem[i]);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+ /* Remote deallocation by the current thread. */
+ for (i = 0; i < NTHREADS; i++) {
+ for (unsigned j = 0; j < REMOTE_NALLOC; j++) {
+ expect_ptr_not_null(mem[i][j],
+ "Unexpected remote allocation failure");
+ dallocx(mem[i][j], 0);
+ }
+ }
+}
+TEST_END
+
+static void *
+thd_start(void *varg) {
+ void *ptr, *ptr2;
+ edata_t *edata;
+ unsigned shard1, shard2;
+
+ tsdn_t *tsdn = tsdn_fetch();
+ /* Try triggering allocations from sharded bins. */
+ for (unsigned i = 0; i < 1024; i++) {
+ ptr = mallocx(1, MALLOCX_TCACHE_NONE);
+ ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
+
+ edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ shard1 = edata_binshard_get(edata);
+ dallocx(ptr, 0);
+ expect_u_lt(shard1, 16, "Unexpected bin shard used");
+
+ edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr2);
+ shard2 = edata_binshard_get(edata);
+ dallocx(ptr2, 0);
+ expect_u_lt(shard2, 4, "Unexpected bin shard used");
+
+ if (shard1 > 0 || shard2 > 0) {
+ /* Triggered sharded bin usage. */
+ return (void *)(uintptr_t)shard1;
+ }
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_bin_shard_mt) {
+ test_skip_if(have_percpu_arena &&
+ PERCPU_ARENA_ENABLED(opt_percpu_arena));
+
+ thd_t thds[NTHREADS];
+ unsigned i;
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start, NULL);
+ }
+ bool sharded = false;
+ for (i = 0; i < NTHREADS; i++) {
+ void *ret;
+ thd_join(thds[i], &ret);
+ if (ret != NULL) {
+ sharded = true;
+ }
+ }
+ expect_b_eq(sharded, true, "Did not find sharded bins");
+}
+TEST_END
+
+TEST_BEGIN(test_bin_shard) {
+ unsigned nbins, i;
+ size_t mib[4], mib2[4];
+ size_t miblen, miblen2, len;
+
+ len = sizeof(nbins);
+ expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ miblen = 4;
+ expect_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ miblen2 = 4;
+ expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ for (i = 0; i < nbins; i++) {
+ uint32_t nshards;
+ size_t size, sz1, sz2;
+
+ mib[2] = i;
+ sz1 = sizeof(nshards);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
+ NULL, 0), 0, "Unexpected mallctlbymib() failure");
+
+ mib2[2] = i;
+ sz2 = sizeof(size);
+ expect_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
+ NULL, 0), 0, "Unexpected mallctlbymib() failure");
+
+ if (size >= 1 && size <= 128) {
+ expect_u_eq(nshards, 16, "Unexpected nshards");
+ } else if (size == 256) {
+ expect_u_eq(nshards, 8, "Unexpected nshards");
+ } else if (size > 128 && size <= 512) {
+ expect_u_eq(nshards, 4, "Unexpected nshards");
+ } else {
+ expect_u_eq(nshards, 1, "Unexpected nshards");
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_bin_shard,
+ test_bin_shard_mt,
+ test_producer_consumer);
+}
diff --git a/deps/jemalloc/test/unit/binshard.sh b/deps/jemalloc/test/unit/binshard.sh
new file mode 100644
index 0000000..c1d58c8
--- /dev/null
+++ b/deps/jemalloc/test/unit/binshard.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="narenas:1,bin_shards:1-160:16|129-512:4|256-256:8"
diff --git a/deps/jemalloc/test/unit/bit_util.c b/deps/jemalloc/test/unit/bit_util.c
new file mode 100644
index 0000000..7d31b21
--- /dev/null
+++ b/deps/jemalloc/test/unit/bit_util.c
@@ -0,0 +1,307 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/bit_util.h"
+
+#define TEST_POW2_CEIL(t, suf, pri) do { \
+ unsigned i, pow2; \
+ t x; \
+ \
+ expect_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
+ \
+ for (i = 0; i < sizeof(t) * 8; i++) { \
+ expect_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
+ << i, "Unexpected result"); \
+ } \
+ \
+ for (i = 2; i < sizeof(t) * 8; i++) { \
+ expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
+ ((t)1) << i, "Unexpected result"); \
+ } \
+ \
+ for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
+ expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
+ ((t)1) << (i+1), "Unexpected result"); \
+ } \
+ \
+ for (pow2 = 1; pow2 < 25; pow2++) { \
+ for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
+ x++) { \
+ expect_##suf##_eq(pow2_ceil_##suf(x), \
+ ((t)1) << pow2, \
+ "Unexpected result, x=%"pri, x); \
+ } \
+ } \
+} while (0)
+
+TEST_BEGIN(test_pow2_ceil_u64) {
+ TEST_POW2_CEIL(uint64_t, u64, FMTu64);
+}
+TEST_END
+
+TEST_BEGIN(test_pow2_ceil_u32) {
+ TEST_POW2_CEIL(uint32_t, u32, FMTu32);
+}
+TEST_END
+
+TEST_BEGIN(test_pow2_ceil_zu) {
+ TEST_POW2_CEIL(size_t, zu, "zu");
+}
+TEST_END
+
+void
+expect_lg_ceil_range(size_t input, unsigned answer) {
+ if (input == 1) {
+ expect_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
+ return;
+ }
+ expect_zu_le(input, (ZU(1) << answer),
+ "Got %u as lg_ceil of %zu", answer, input);
+ expect_zu_gt(input, (ZU(1) << (answer - 1)),
+ "Got %u as lg_ceil of %zu", answer, input);
+}
+
+void
+expect_lg_floor_range(size_t input, unsigned answer) {
+ if (input == 1) {
+ expect_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
+ return;
+ }
+ expect_zu_ge(input, (ZU(1) << answer),
+ "Got %u as lg_floor of %zu", answer, input);
+ expect_zu_lt(input, (ZU(1) << (answer + 1)),
+ "Got %u as lg_floor of %zu", answer, input);
+}
+
+TEST_BEGIN(test_lg_ceil_floor) {
+ for (size_t i = 1; i < 10 * 1000 * 1000; i++) {
+ expect_lg_ceil_range(i, lg_ceil(i));
+ expect_lg_ceil_range(i, LG_CEIL(i));
+ expect_lg_floor_range(i, lg_floor(i));
+ expect_lg_floor_range(i, LG_FLOOR(i));
+ }
+ for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) {
+ for (size_t j = 0; j < (1 << 4); j++) {
+ size_t num1 = ((size_t)1 << i)
+ - j * ((size_t)1 << (i - 4));
+ size_t num2 = ((size_t)1 << i)
+ + j * ((size_t)1 << (i - 4));
+ expect_zu_ne(num1, 0, "Invalid lg argument");
+ expect_zu_ne(num2, 0, "Invalid lg argument");
+ expect_lg_ceil_range(num1, lg_ceil(num1));
+ expect_lg_ceil_range(num1, LG_CEIL(num1));
+ expect_lg_ceil_range(num2, lg_ceil(num2));
+ expect_lg_ceil_range(num2, LG_CEIL(num2));
+
+ expect_lg_floor_range(num1, lg_floor(num1));
+ expect_lg_floor_range(num1, LG_FLOOR(num1));
+ expect_lg_floor_range(num2, lg_floor(num2));
+ expect_lg_floor_range(num2, LG_FLOOR(num2));
+ }
+ }
+}
+TEST_END
+
+#define TEST_FFS(t, suf, test_suf, pri) do { \
+ for (unsigned i = 0; i < sizeof(t) * 8; i++) { \
+ for (unsigned j = 0; j <= i; j++) { \
+ for (unsigned k = 0; k <= j; k++) { \
+ t x = (t)1 << i; \
+ x |= (t)1 << j; \
+ x |= (t)1 << k; \
+ expect_##test_suf##_eq(ffs_##suf(x), k, \
+ "Unexpected result, x=%"pri, x); \
+ } \
+ } \
+ } \
+} while(0)
+
+TEST_BEGIN(test_ffs_u) {
+ TEST_FFS(unsigned, u, u,"u");
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_lu) {
+ TEST_FFS(unsigned long, lu, lu, "lu");
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_llu) {
+ TEST_FFS(unsigned long long, llu, qd, "llu");
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_u32) {
+ TEST_FFS(uint32_t, u32, u32, FMTu32);
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_u64) {
+ TEST_FFS(uint64_t, u64, u64, FMTu64);
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_zu) {
+ TEST_FFS(size_t, zu, zu, "zu");
+}
+TEST_END
+
+#define TEST_FLS(t, suf, test_suf, pri) do { \
+ for (unsigned i = 0; i < sizeof(t) * 8; i++) { \
+ for (unsigned j = 0; j <= i; j++) { \
+ for (unsigned k = 0; k <= j; k++) { \
+ t x = (t)1 << i; \
+ x |= (t)1 << j; \
+ x |= (t)1 << k; \
+ expect_##test_suf##_eq(fls_##suf(x), i, \
+ "Unexpected result, x=%"pri, x); \
+ } \
+ } \
+ } \
+} while(0)
+
+TEST_BEGIN(test_fls_u) {
+ TEST_FLS(unsigned, u, u,"u");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_lu) {
+ TEST_FLS(unsigned long, lu, lu, "lu");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_llu) {
+ TEST_FLS(unsigned long long, llu, qd, "llu");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_u32) {
+ TEST_FLS(uint32_t, u32, u32, FMTu32);
+}
+TEST_END
+
+TEST_BEGIN(test_fls_u64) {
+ TEST_FLS(uint64_t, u64, u64, FMTu64);
+}
+TEST_END
+
+TEST_BEGIN(test_fls_zu) {
+ TEST_FLS(size_t, zu, zu, "zu");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_u_slow) {
+ TEST_FLS(unsigned, u_slow, u,"u");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_lu_slow) {
+ TEST_FLS(unsigned long, lu_slow, lu, "lu");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_llu_slow) {
+ TEST_FLS(unsigned long long, llu_slow, qd, "llu");
+}
+TEST_END
+
+static unsigned
+popcount_byte(unsigned byte) {
+ int count = 0;
+ for (int i = 0; i < 8; i++) {
+ if ((byte & (1 << i)) != 0) {
+ count++;
+ }
+ }
+ return count;
+}
+
+static uint64_t
+expand_byte_to_mask(unsigned byte) {
+ uint64_t result = 0;
+ for (int i = 0; i < 8; i++) {
+ if ((byte & (1 << i)) != 0) {
+ result |= ((uint64_t)0xFF << (i * 8));
+ }
+ }
+ return result;
+}
+
+#define TEST_POPCOUNT(t, suf, pri_hex) do { \
+ t bmul = (t)0x0101010101010101ULL; \
+ for (unsigned i = 0; i < (1 << sizeof(t)); i++) { \
+ for (unsigned j = 0; j < 256; j++) { \
+ /* \
+ * Replicate the byte j into various \
+ * bytes of the integer (as indicated by the \
+ * mask in i), and ensure that the popcount of \
+ * the result is popcount(i) * popcount(j) \
+ */ \
+ t mask = (t)expand_byte_to_mask(i); \
+ t x = (bmul * j) & mask; \
+ expect_u_eq( \
+ popcount_byte(i) * popcount_byte(j), \
+ popcount_##suf(x), \
+ "Unexpected result, x=0x%"pri_hex, x); \
+ } \
+ } \
+} while (0)
+
+TEST_BEGIN(test_popcount_u) {
+ TEST_POPCOUNT(unsigned, u, "x");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_u_slow) {
+ TEST_POPCOUNT(unsigned, u_slow, "x");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_lu) {
+ TEST_POPCOUNT(unsigned long, lu, "lx");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_lu_slow) {
+ TEST_POPCOUNT(unsigned long, lu_slow, "lx");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_llu) {
+ TEST_POPCOUNT(unsigned long long, llu, "llx");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_llu_slow) {
+ TEST_POPCOUNT(unsigned long long, llu_slow, "llx");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_pow2_ceil_u64,
+ test_pow2_ceil_u32,
+ test_pow2_ceil_zu,
+ test_lg_ceil_floor,
+ test_ffs_u,
+ test_ffs_lu,
+ test_ffs_llu,
+ test_ffs_u32,
+ test_ffs_u64,
+ test_ffs_zu,
+ test_fls_u,
+ test_fls_lu,
+ test_fls_llu,
+ test_fls_u32,
+ test_fls_u64,
+ test_fls_zu,
+ test_fls_u_slow,
+ test_fls_lu_slow,
+ test_fls_llu_slow,
+ test_popcount_u,
+ test_popcount_u_slow,
+ test_popcount_lu,
+ test_popcount_lu_slow,
+ test_popcount_llu,
+ test_popcount_llu_slow);
+}
diff --git a/deps/jemalloc/test/unit/bitmap.c b/deps/jemalloc/test/unit/bitmap.c
new file mode 100644
index 0000000..78e542b
--- /dev/null
+++ b/deps/jemalloc/test/unit/bitmap.c
@@ -0,0 +1,343 @@
+#include "test/jemalloc_test.h"
+
+#include "test/nbits.h"
+
+static void
+test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) {
+ bitmap_info_t binfo_dyn;
+ bitmap_info_init(&binfo_dyn, nbits);
+
+ expect_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
+ "Unexpected difference between static and dynamic initialization, "
+ "nbits=%zu", nbits);
+ expect_zu_eq(binfo->nbits, binfo_dyn.nbits,
+ "Unexpected difference between static and dynamic initialization, "
+ "nbits=%zu", nbits);
+#ifdef BITMAP_USE_TREE
+ expect_u_eq(binfo->nlevels, binfo_dyn.nlevels,
+ "Unexpected difference between static and dynamic initialization, "
+ "nbits=%zu", nbits);
+ {
+ unsigned i;
+
+ for (i = 0; i < binfo->nlevels; i++) {
+ expect_zu_eq(binfo->levels[i].group_offset,
+ binfo_dyn.levels[i].group_offset,
+ "Unexpected difference between static and dynamic "
+ "initialization, nbits=%zu, level=%u", nbits, i);
+ }
+ }
+#else
+ expect_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
+ "Unexpected difference between static and dynamic initialization");
+#endif
+}
+
+TEST_BEGIN(test_bitmap_initializer) {
+#define NB(nbits) { \
+ if (nbits <= BITMAP_MAXBITS) { \
+ bitmap_info_t binfo = \
+ BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_initializer_body(&binfo, nbits); \
+ } \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static size_t
+test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
+ size_t prev_size) {
+ size_t size = bitmap_size(binfo);
+ expect_zu_ge(size, (nbits >> 3),
+ "Bitmap size is smaller than expected");
+ expect_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
+ return size;
+}
+
+TEST_BEGIN(test_bitmap_size) {
+ size_t nbits, prev_size;
+
+ prev_size = 0;
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ prev_size = test_bitmap_size_body(&binfo, nbits, prev_size);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ prev_size = test_bitmap_size_body(&binfo, nbits, \
+ prev_size); \
+ }
+ prev_size = 0;
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) {
+ size_t i;
+ bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
+ expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
+
+ bitmap_init(bitmap, binfo, false);
+ for (i = 0; i < nbits; i++) {
+ expect_false(bitmap_get(bitmap, binfo, i),
+ "Bit should be unset");
+ }
+
+ bitmap_init(bitmap, binfo, true);
+ for (i = 0; i < nbits; i++) {
+ expect_true(bitmap_get(bitmap, binfo, i), "Bit should be set");
+ }
+
+ free(bitmap);
+}
+
+TEST_BEGIN(test_bitmap_init) {
+ size_t nbits;
+
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ test_bitmap_init_body(&binfo, nbits);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_init_body(&binfo, nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) {
+ size_t i;
+ bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
+ expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ bitmap_init(bitmap, binfo, false);
+
+ for (i = 0; i < nbits; i++) {
+ bitmap_set(bitmap, binfo, i);
+ }
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ free(bitmap);
+}
+
+TEST_BEGIN(test_bitmap_set) {
+ size_t nbits;
+
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ test_bitmap_set_body(&binfo, nbits);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_set_body(&binfo, nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) {
+ size_t i;
+ bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
+ expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ bitmap_init(bitmap, binfo, false);
+
+ for (i = 0; i < nbits; i++) {
+ bitmap_set(bitmap, binfo, i);
+ }
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ for (i = 0; i < nbits; i++) {
+ bitmap_unset(bitmap, binfo, i);
+ }
+ for (i = 0; i < nbits; i++) {
+ bitmap_set(bitmap, binfo, i);
+ }
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ free(bitmap);
+}
+
+TEST_BEGIN(test_bitmap_unset) {
+ size_t nbits;
+
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ test_bitmap_unset_body(&binfo, nbits);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_unset_body(&binfo, nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
+ bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
+ expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ bitmap_init(bitmap, binfo, false);
+
+ /* Iteratively set bits starting at the beginning. */
+ for (size_t i = 0; i < nbits; i++) {
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ "First unset bit should be just after previous first unset "
+ "bit");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ "First unset bit should be just after previous first unset "
+ "bit");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "First unset bit should be just after previous first unset "
+ "bit");
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "First unset bit should be just after previous first unset "
+ "bit");
+ }
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
+
+ /*
+ * Iteratively unset bits starting at the end, and verify that
+ * bitmap_sfu() reaches the unset bits.
+ */
+ for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
+ bitmap_unset(bitmap, binfo, i);
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ "First unset bit should the bit previously unset");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ "First unset bit should the bit previously unset");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "First unset bit should the bit previously unset");
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "First unset bit should the bit previously unset");
+ bitmap_unset(bitmap, binfo, i);
+ }
+ expect_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
+
+ /*
+ * Iteratively set bits starting at the beginning, and verify that
+ * bitmap_sfu() looks past them.
+ */
+ for (size_t i = 1; i < nbits; i++) {
+ bitmap_set(bitmap, binfo, i - 1);
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ "First unset bit should be just after the bit previously "
+ "set");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ "First unset bit should be just after the bit previously "
+ "set");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "First unset bit should be just after the bit previously "
+ "set");
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "First unset bit should be just after the bit previously "
+ "set");
+ bitmap_unset(bitmap, binfo, i);
+ }
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1,
+ "First unset bit should be the last bit");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1),
+ nbits - 1, "First unset bit should be the last bit");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1,
+ "First unset bit should be the last bit");
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
+ "First unset bit should be the last bit");
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
+
+ /*
+ * Bubble a "usu" pattern through the bitmap and verify that
+ * bitmap_ffu() finds the correct bit for all five min_bit cases.
+ */
+ if (nbits >= 3) {
+ for (size_t i = 0; i < nbits-2; i++) {
+ bitmap_unset(bitmap, binfo, i);
+ bitmap_unset(bitmap, binfo, i+2);
+ if (i > 0) {
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
+ "Unexpected first unset bit");
+ }
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "Unexpected first unset bit");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2,
+ "Unexpected first unset bit");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2,
+ "Unexpected first unset bit");
+ if (i + 3 < nbits) {
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i+3),
+ nbits, "Unexpected first unset bit");
+ }
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "Unexpected first unset bit");
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i+2,
+ "Unexpected first unset bit");
+ }
+ }
+
+ /*
+ * Unset the last bit, bubble another unset bit through the bitmap, and
+ * verify that bitmap_ffu() finds the correct bit for all four min_bit
+ * cases.
+ */
+ if (nbits >= 3) {
+ bitmap_unset(bitmap, binfo, nbits-1);
+ for (size_t i = 0; i < nbits-1; i++) {
+ bitmap_unset(bitmap, binfo, i);
+ if (i > 0) {
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
+ "Unexpected first unset bit");
+ }
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "Unexpected first unset bit");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1,
+ "Unexpected first unset bit");
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1),
+ nbits-1, "Unexpected first unset bit");
+
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "Unexpected first unset bit");
+ }
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1,
+ "Unexpected first unset bit");
+ }
+
+ free(bitmap);
+}
+
+TEST_BEGIN(test_bitmap_xfu) {
+ size_t nbits, nbits_max;
+
+ /* The test is O(n^2); large page sizes may slow down too much. */
+ nbits_max = BITMAP_MAXBITS > 512 ? 512 : BITMAP_MAXBITS;
+ for (nbits = 1; nbits <= nbits_max; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ test_bitmap_xfu_body(&binfo, nbits);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_xfu_body(&binfo, nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_bitmap_initializer,
+ test_bitmap_size,
+ test_bitmap_init,
+ test_bitmap_set,
+ test_bitmap_unset,
+ test_bitmap_xfu);
+}
diff --git a/deps/jemalloc/test/unit/buf_writer.c b/deps/jemalloc/test/unit/buf_writer.c
new file mode 100644
index 0000000..d5e63a0
--- /dev/null
+++ b/deps/jemalloc/test/unit/buf_writer.c
@@ -0,0 +1,196 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/buf_writer.h"
+
+#define TEST_BUF_SIZE 16
+#define UNIT_MAX (TEST_BUF_SIZE * 3)
+
+static size_t test_write_len;
+static char test_buf[TEST_BUF_SIZE];
+static uint64_t arg;
+static uint64_t arg_store;
+
+static void
+test_write_cb(void *cbopaque, const char *s) {
+ size_t prev_test_write_len = test_write_len;
+ test_write_len += strlen(s); /* only increase the length */
+ arg_store = *(uint64_t *)cbopaque; /* only pass along the argument */
+ assert_zu_le(prev_test_write_len, test_write_len,
+ "Test write overflowed");
+}
+
+static void
+test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
+ char s[UNIT_MAX + 1];
+ size_t n_unit, remain, i;
+ ssize_t unit;
+
+ assert(buf_writer->buf != NULL);
+ memset(s, 'a', UNIT_MAX);
+ arg = 4; /* Starting value of random argument. */
+ arg_store = arg;
+ for (unit = UNIT_MAX; unit >= 0; --unit) {
+ /* unit keeps decreasing, so strlen(s) is always unit. */
+ s[unit] = '\0';
+ for (n_unit = 1; n_unit <= 3; ++n_unit) {
+ test_write_len = 0;
+ remain = 0;
+ for (i = 1; i <= n_unit; ++i) {
+ arg = prng_lg_range_u64(&arg, 64);
+ buf_writer_cb(buf_writer, s);
+ remain += unit;
+ if (remain > buf_writer->buf_size) {
+ /* Flushes should have happened. */
+ assert_u64_eq(arg_store, arg, "Call "
+ "back argument didn't get through");
+ remain %= buf_writer->buf_size;
+ if (remain == 0) {
+ /* Last flush should be lazy. */
+ remain += buf_writer->buf_size;
+ }
+ }
+ assert_zu_eq(test_write_len + remain, i * unit,
+ "Incorrect length after writing %zu strings"
+ " of length %zu", i, unit);
+ }
+ buf_writer_flush(buf_writer);
+ expect_zu_eq(test_write_len, n_unit * unit,
+ "Incorrect length after flushing at the end of"
+ " writing %zu strings of length %zu", n_unit, unit);
+ }
+ }
+ buf_writer_terminate(tsdn, buf_writer);
+}
+
+TEST_BEGIN(test_buf_write_static) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ test_buf, TEST_BUF_SIZE),
+ "buf_writer_init() should not encounter error on static buffer");
+ test_buf_writer_body(tsdn, &buf_writer);
+}
+TEST_END
+
+TEST_BEGIN(test_buf_write_dynamic) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ NULL, TEST_BUF_SIZE), "buf_writer_init() should not OOM");
+ test_buf_writer_body(tsdn, &buf_writer);
+}
+TEST_END
+
+TEST_BEGIN(test_buf_write_oom) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
+ assert(buf_writer.buf == NULL);
+
+ char s[UNIT_MAX + 1];
+ size_t n_unit, i;
+ ssize_t unit;
+
+ memset(s, 'a', UNIT_MAX);
+ arg = 4; /* Starting value of random argument. */
+ arg_store = arg;
+ for (unit = UNIT_MAX; unit >= 0; unit -= UNIT_MAX / 4) {
+ /* unit keeps decreasing, so strlen(s) is always unit. */
+ s[unit] = '\0';
+ for (n_unit = 1; n_unit <= 3; ++n_unit) {
+ test_write_len = 0;
+ for (i = 1; i <= n_unit; ++i) {
+ arg = prng_lg_range_u64(&arg, 64);
+ buf_writer_cb(&buf_writer, s);
+ assert_u64_eq(arg_store, arg,
+ "Call back argument didn't get through");
+ assert_zu_eq(test_write_len, i * unit,
+ "Incorrect length after writing %zu strings"
+ " of length %zu", i, unit);
+ }
+ buf_writer_flush(&buf_writer);
+ expect_zu_eq(test_write_len, n_unit * unit,
+ "Incorrect length after flushing at the end of"
+ " writing %zu strings of length %zu", n_unit, unit);
+ }
+ }
+ buf_writer_terminate(tsdn, &buf_writer);
+}
+TEST_END
+
+static int test_read_count;
+static size_t test_read_len;
+static uint64_t arg_sum;
+
+ssize_t
+test_read_cb(void *cbopaque, void *buf, size_t limit) {
+ static uint64_t rand = 4;
+
+ arg_sum += *(uint64_t *)cbopaque;
+ assert_zu_gt(limit, 0, "Limit for read_cb must be positive");
+ --test_read_count;
+ if (test_read_count == 0) {
+ return -1;
+ } else {
+ size_t read_len = limit;
+ if (limit > 1) {
+ rand = prng_range_u64(&rand, (uint64_t)limit);
+ read_len -= (size_t)rand;
+ }
+ assert(read_len > 0);
+ memset(buf, 'a', read_len);
+ size_t prev_test_read_len = test_read_len;
+ test_read_len += read_len;
+ assert_zu_le(prev_test_read_len, test_read_len,
+ "Test read overflowed");
+ return read_len;
+ }
+}
+
+static void
+test_buf_writer_pipe_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
+ arg = 4; /* Starting value of random argument. */
+ for (int count = 5; count > 0; --count) {
+ arg = prng_lg_range_u64(&arg, 64);
+ arg_sum = 0;
+ test_read_count = count;
+ test_read_len = 0;
+ test_write_len = 0;
+ buf_writer_pipe(buf_writer, test_read_cb, &arg);
+ assert(test_read_count == 0);
+ expect_u64_eq(arg_sum, arg * count, "");
+ expect_zu_eq(test_write_len, test_read_len,
+ "Write length should be equal to read length");
+ }
+ buf_writer_terminate(tsdn, buf_writer);
+}
+
+TEST_BEGIN(test_buf_write_pipe) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ test_buf, TEST_BUF_SIZE),
+ "buf_writer_init() should not encounter error on static buffer");
+ test_buf_writer_pipe_body(tsdn, &buf_writer);
+}
+TEST_END
+
+TEST_BEGIN(test_buf_write_pipe_oom) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
+ test_buf_writer_pipe_body(tsdn, &buf_writer);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_buf_write_static,
+ test_buf_write_dynamic,
+ test_buf_write_oom,
+ test_buf_write_pipe,
+ test_buf_write_pipe_oom);
+}
diff --git a/deps/jemalloc/test/unit/cache_bin.c b/deps/jemalloc/test/unit/cache_bin.c
new file mode 100644
index 0000000..3b6dbab
--- /dev/null
+++ b/deps/jemalloc/test/unit/cache_bin.c
@@ -0,0 +1,384 @@
+#include "test/jemalloc_test.h"
+
+static void
+do_fill_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
+ cache_bin_sz_t ncached_max, cache_bin_sz_t nfill_attempt,
+ cache_bin_sz_t nfill_succeed) {
+ bool success;
+ void *ptr;
+ assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
+ CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill_attempt);
+ cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill_attempt);
+ for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
+ arr.ptr[i] = &ptrs[i];
+ }
+ cache_bin_finish_fill(bin, info, &arr, nfill_succeed);
+ expect_true(cache_bin_ncached_get_local(bin, info) == nfill_succeed,
+ "");
+ cache_bin_low_water_set(bin);
+
+ for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
+ ptr = cache_bin_alloc(bin, &success);
+ expect_true(success, "");
+ expect_ptr_eq(ptr, (void *)&ptrs[i],
+ "Should pop in order filled");
+ expect_true(cache_bin_low_water_get(bin, info)
+ == nfill_succeed - i - 1, "");
+ }
+ expect_true(cache_bin_ncached_get_local(bin, info) == 0, "");
+ expect_true(cache_bin_low_water_get(bin, info) == 0, "");
+}
+
+static void
+do_flush_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
+ cache_bin_sz_t nfill, cache_bin_sz_t nflush) {
+ bool success;
+ assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
+
+ for (cache_bin_sz_t i = 0; i < nfill; i++) {
+ success = cache_bin_dalloc_easy(bin, &ptrs[i]);
+ expect_true(success, "");
+ }
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(arr, nflush);
+ cache_bin_init_ptr_array_for_flush(bin, info, &arr, nflush);
+ for (cache_bin_sz_t i = 0; i < nflush; i++) {
+ expect_ptr_eq(arr.ptr[i], &ptrs[nflush - i - 1], "");
+ }
+ cache_bin_finish_flush(bin, info, &arr, nflush);
+
+ expect_true(cache_bin_ncached_get_local(bin, info) == nfill - nflush,
+ "");
+ while (cache_bin_ncached_get_local(bin, info) > 0) {
+ cache_bin_alloc(bin, &success);
+ }
+}
+
+static void
+do_batch_alloc_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
+ cache_bin_sz_t nfill, size_t batch) {
+ assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
+ CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill);
+ cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill);
+ for (cache_bin_sz_t i = 0; i < nfill; i++) {
+ arr.ptr[i] = &ptrs[i];
+ }
+ cache_bin_finish_fill(bin, info, &arr, nfill);
+ assert_true(cache_bin_ncached_get_local(bin, info) == nfill, "");
+ cache_bin_low_water_set(bin);
+
+ void **out = malloc((batch + 1) * sizeof(void *));
+ size_t n = cache_bin_alloc_batch(bin, batch, out);
+ assert_true(n == ((size_t)nfill < batch ? (size_t)nfill : batch), "");
+ for (cache_bin_sz_t i = 0; i < (cache_bin_sz_t)n; i++) {
+ expect_ptr_eq(out[i], &ptrs[i], "");
+ }
+ expect_true(cache_bin_low_water_get(bin, info) == nfill -
+ (cache_bin_sz_t)n, "");
+ while (cache_bin_ncached_get_local(bin, info) > 0) {
+ bool success;
+ cache_bin_alloc(bin, &success);
+ }
+ free(out);
+}
+
+static void
+test_bin_init(cache_bin_t *bin, cache_bin_info_t *info) {
+ size_t size;
+ size_t alignment;
+ cache_bin_info_compute_alloc(info, 1, &size, &alignment);
+ void *mem = mallocx(size, MALLOCX_ALIGN(alignment));
+ assert_ptr_not_null(mem, "Unexpected mallocx failure");
+
+ size_t cur_offset = 0;
+ cache_bin_preincrement(info, 1, mem, &cur_offset);
+ cache_bin_init(bin, info, mem, &cur_offset);
+ cache_bin_postincrement(info, 1, mem, &cur_offset);
+ assert_zu_eq(cur_offset, size, "Should use all requested memory");
+}
+
+TEST_BEGIN(test_cache_bin) {
+ const int ncached_max = 100;
+ bool success;
+ void *ptr;
+
+ cache_bin_info_t info;
+ cache_bin_info_init(&info, ncached_max);
+ cache_bin_t bin;
+ test_bin_init(&bin, &info);
+
+ /* Initialize to empty; should then have 0 elements. */
+ expect_d_eq(ncached_max, cache_bin_info_ncached_max(&info), "");
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == 0, "");
+ expect_true(cache_bin_low_water_get(&bin, &info) == 0, "");
+
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_false(success, "Shouldn't successfully allocate when empty");
+ expect_ptr_null(ptr, "Shouldn't get a non-null pointer on failure");
+
+ ptr = cache_bin_alloc(&bin, &success);
+ expect_false(success, "Shouldn't successfully allocate when empty");
+ expect_ptr_null(ptr, "Shouldn't get a non-null pointer on failure");
+
+ /*
+ * We allocate one more item than ncached_max, so we can test cache bin
+ * exhaustion.
+ */
+ void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
+ assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
+ for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == i, "");
+ success = cache_bin_dalloc_easy(&bin, &ptrs[i]);
+ expect_true(success,
+ "Should be able to dalloc into a non-full cache bin.");
+ expect_true(cache_bin_low_water_get(&bin, &info) == 0,
+ "Pushes and pops shouldn't change low water of zero.");
+ }
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == ncached_max,
+ "");
+ success = cache_bin_dalloc_easy(&bin, &ptrs[ncached_max]);
+ expect_false(success, "Shouldn't be able to dalloc into a full bin.");
+
+ cache_bin_low_water_set(&bin);
+
+ for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
+ expect_true(cache_bin_low_water_get(&bin, &info)
+ == ncached_max - i, "");
+ expect_true(cache_bin_ncached_get_local(&bin, &info)
+ == ncached_max - i, "");
+ /*
+ * This should fail -- the easy variant can't change the low
+ * water mark.
+ */
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_ptr_null(ptr, "");
+ expect_false(success, "");
+ expect_true(cache_bin_low_water_get(&bin, &info)
+ == ncached_max - i, "");
+ expect_true(cache_bin_ncached_get_local(&bin, &info)
+ == ncached_max - i, "");
+
+ /* This should succeed, though. */
+ ptr = cache_bin_alloc(&bin, &success);
+ expect_true(success, "");
+ expect_ptr_eq(ptr, &ptrs[ncached_max - i - 1],
+ "Alloc should pop in stack order");
+ expect_true(cache_bin_low_water_get(&bin, &info)
+ == ncached_max - i - 1, "");
+ expect_true(cache_bin_ncached_get_local(&bin, &info)
+ == ncached_max - i - 1, "");
+ }
+ /* Now we're empty -- all alloc attempts should fail. */
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == 0, "");
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_ptr_null(ptr, "");
+ expect_false(success, "");
+ ptr = cache_bin_alloc(&bin, &success);
+ expect_ptr_null(ptr, "");
+ expect_false(success, "");
+
+ for (cache_bin_sz_t i = 0; i < ncached_max / 2; i++) {
+ cache_bin_dalloc_easy(&bin, &ptrs[i]);
+ }
+ cache_bin_low_water_set(&bin);
+
+ for (cache_bin_sz_t i = ncached_max / 2; i < ncached_max; i++) {
+ cache_bin_dalloc_easy(&bin, &ptrs[i]);
+ }
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == ncached_max,
+ "");
+ for (cache_bin_sz_t i = ncached_max - 1; i >= ncached_max / 2; i--) {
+ /*
+ * Size is bigger than low water -- the reduced version should
+ * succeed.
+ */
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_true(success, "");
+ expect_ptr_eq(ptr, &ptrs[i], "");
+ }
+ /* But now, we've hit low-water. */
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_false(success, "");
+ expect_ptr_null(ptr, "");
+
+ /* We're going to test filling -- we must be empty to start. */
+ while (cache_bin_ncached_get_local(&bin, &info)) {
+ cache_bin_alloc(&bin, &success);
+ expect_true(success, "");
+ }
+
+ /* Test fill. */
+ /* Try to fill all, succeed fully. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max, ncached_max);
+ /* Try to fill all, succeed partially. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max,
+ ncached_max / 2);
+ /* Try to fill all, fail completely. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max, 0);
+
+ /* Try to fill some, succeed fully. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2,
+ ncached_max / 2);
+ /* Try to fill some, succeed partially. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2,
+ ncached_max / 4);
+ /* Try to fill some, fail completely. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2, 0);
+
+ do_flush_test(&bin, &info, ptrs, ncached_max, ncached_max);
+ do_flush_test(&bin, &info, ptrs, ncached_max, ncached_max / 2);
+ do_flush_test(&bin, &info, ptrs, ncached_max, 0);
+ do_flush_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
+ do_flush_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
+ do_flush_test(&bin, &info, ptrs, ncached_max / 2, 0);
+
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max * 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max / 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 0);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2,
+ ncached_max / 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, ncached_max);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2,
+ ncached_max / 4);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 0);
+ do_batch_alloc_test(&bin, &info, ptrs, 2, ncached_max);
+ do_batch_alloc_test(&bin, &info, ptrs, 2, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, 2, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, 2, 0);
+ do_batch_alloc_test(&bin, &info, ptrs, 1, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, 1, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, 1, 0);
+ do_batch_alloc_test(&bin, &info, ptrs, 0, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, 0, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, 0, 0);
+
+ free(ptrs);
+}
+TEST_END
+
+static void
+do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
+ cache_bin_sz_t nfill, cache_bin_sz_t nstash) {
+ expect_true(cache_bin_ncached_get_local(bin, info) == 0,
+ "Bin not empty");
+ expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
+ "Bin not empty");
+ expect_true(nfill + nstash <= info->ncached_max, "Exceeded max");
+
+ bool ret;
+ /* Fill */
+ for (cache_bin_sz_t i = 0; i < nfill; i++) {
+ ret = cache_bin_dalloc_easy(bin, &ptrs[i]);
+ expect_true(ret, "Unexpected fill failure");
+ }
+ expect_true(cache_bin_ncached_get_local(bin, info) == nfill,
+ "Wrong cached count");
+
+ /* Stash */
+ for (cache_bin_sz_t i = 0; i < nstash; i++) {
+ ret = cache_bin_stash(bin, &ptrs[i + nfill]);
+ expect_true(ret, "Unexpected stash failure");
+ }
+ expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
+ "Wrong stashed count");
+
+ if (nfill + nstash == info->ncached_max) {
+ ret = cache_bin_dalloc_easy(bin, &ptrs[0]);
+ expect_false(ret, "Should not dalloc into a full bin");
+ ret = cache_bin_stash(bin, &ptrs[0]);
+ expect_false(ret, "Should not stash into a full bin");
+ }
+
+ /* Alloc filled ones */
+ for (cache_bin_sz_t i = 0; i < nfill; i++) {
+ void *ptr = cache_bin_alloc(bin, &ret);
+ expect_true(ret, "Unexpected alloc failure");
+ /* Verify it's not from the stashed range. */
+ expect_true((uintptr_t)ptr < (uintptr_t)&ptrs[nfill],
+ "Should not alloc stashed ptrs");
+ }
+ expect_true(cache_bin_ncached_get_local(bin, info) == 0,
+ "Wrong cached count");
+ expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
+ "Wrong stashed count");
+
+ cache_bin_alloc(bin, &ret);
+ expect_false(ret, "Should not alloc stashed");
+
+ /* Clear stashed ones */
+ cache_bin_finish_flush_stashed(bin, info);
+ expect_true(cache_bin_ncached_get_local(bin, info) == 0,
+ "Wrong cached count");
+ expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
+ "Wrong stashed count");
+
+ cache_bin_alloc(bin, &ret);
+ expect_false(ret, "Should not alloc from empty bin");
+}
+
+TEST_BEGIN(test_cache_bin_stash) {
+ const int ncached_max = 100;
+
+ cache_bin_t bin;
+ cache_bin_info_t info;
+ cache_bin_info_init(&info, ncached_max);
+ test_bin_init(&bin, &info);
+
+ /*
+ * The content of this array is not accessed; instead the interior
+ * addresses are used to insert / stash into the bins as test pointers.
+ */
+ void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
+ assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
+ bool ret;
+ for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
+ expect_true(cache_bin_ncached_get_local(&bin, &info) ==
+ (i / 2 + i % 2), "Wrong ncached value");
+ expect_true(cache_bin_nstashed_get_local(&bin, &info) == i / 2,
+ "Wrong nstashed value");
+ if (i % 2 == 0) {
+ cache_bin_dalloc_easy(&bin, &ptrs[i]);
+ } else {
+ ret = cache_bin_stash(&bin, &ptrs[i]);
+ expect_true(ret, "Should be able to stash into a "
+ "non-full cache bin");
+ }
+ }
+ ret = cache_bin_dalloc_easy(&bin, &ptrs[0]);
+ expect_false(ret, "Should not dalloc into a full cache bin");
+ ret = cache_bin_stash(&bin, &ptrs[0]);
+ expect_false(ret, "Should not stash into a full cache bin");
+ for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
+ void *ptr = cache_bin_alloc(&bin, &ret);
+ if (i < ncached_max / 2) {
+ expect_true(ret, "Should be able to alloc");
+ uintptr_t diff = ((uintptr_t)ptr - (uintptr_t)&ptrs[0])
+ / sizeof(void *);
+ expect_true(diff % 2 == 0, "Should be able to alloc");
+ } else {
+ expect_false(ret, "Should not alloc stashed");
+ expect_true(cache_bin_nstashed_get_local(&bin, &info) ==
+ ncached_max / 2, "Wrong nstashed value");
+ }
+ }
+
+ test_bin_init(&bin, &info);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max, 0);
+ do_flush_stashed_test(&bin, &info, ptrs, 0, ncached_max);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 2);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 4);
+}
+TEST_END
+
+int
+main(void) {
+ return test(test_cache_bin,
+ test_cache_bin_stash);
+}
diff --git a/deps/jemalloc/test/unit/ckh.c b/deps/jemalloc/test/unit/ckh.c
new file mode 100644
index 0000000..36142ac
--- /dev/null
+++ b/deps/jemalloc/test/unit/ckh.c
@@ -0,0 +1,211 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_new_delete) {
+ tsd_t *tsd;
+ ckh_t ckh;
+
+ tsd = tsd_fetch();
+
+ expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
+ ckh_string_keycomp), "Unexpected ckh_new() error");
+ ckh_delete(tsd, &ckh);
+
+ expect_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
+ ckh_pointer_keycomp), "Unexpected ckh_new() error");
+ ckh_delete(tsd, &ckh);
+}
+TEST_END
+
+TEST_BEGIN(test_count_insert_search_remove) {
+ tsd_t *tsd;
+ ckh_t ckh;
+ const char *strs[] = {
+ "a string",
+ "A string",
+ "a string.",
+ "A string."
+ };
+ const char *missing = "A string not in the hash table.";
+ size_t i;
+
+ tsd = tsd_fetch();
+
+ expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
+ ckh_string_keycomp), "Unexpected ckh_new() error");
+ expect_zu_eq(ckh_count(&ckh), 0,
+ "ckh_count() should return %zu, but it returned %zu", ZU(0),
+ ckh_count(&ckh));
+
+ /* Insert. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ ckh_insert(tsd, &ckh, strs[i], strs[i]);
+ expect_zu_eq(ckh_count(&ckh), i+1,
+ "ckh_count() should return %zu, but it returned %zu", i+1,
+ ckh_count(&ckh));
+ }
+
+ /* Search. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ union {
+ void *p;
+ const char *s;
+ } k, v;
+ void **kp, **vp;
+ const char *ks, *vs;
+
+ kp = (i & 1) ? &k.p : NULL;
+ vp = (i & 2) ? &v.p : NULL;
+ k.p = NULL;
+ v.p = NULL;
+ expect_false(ckh_search(&ckh, strs[i], kp, vp),
+ "Unexpected ckh_search() error");
+
+ ks = (i & 1) ? strs[i] : (const char *)NULL;
+ vs = (i & 2) ? strs[i] : (const char *)NULL;
+ expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ i);
+ expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ i);
+ }
+ expect_true(ckh_search(&ckh, missing, NULL, NULL),
+ "Unexpected ckh_search() success");
+
+ /* Remove. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ union {
+ void *p;
+ const char *s;
+ } k, v;
+ void **kp, **vp;
+ const char *ks, *vs;
+
+ kp = (i & 1) ? &k.p : NULL;
+ vp = (i & 2) ? &v.p : NULL;
+ k.p = NULL;
+ v.p = NULL;
+ expect_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
+ "Unexpected ckh_remove() error");
+
+ ks = (i & 1) ? strs[i] : (const char *)NULL;
+ vs = (i & 2) ? strs[i] : (const char *)NULL;
+ expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ i);
+ expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ i);
+ expect_zu_eq(ckh_count(&ckh),
+ sizeof(strs)/sizeof(const char *) - i - 1,
+ "ckh_count() should return %zu, but it returned %zu",
+ sizeof(strs)/sizeof(const char *) - i - 1,
+ ckh_count(&ckh));
+ }
+
+ ckh_delete(tsd, &ckh);
+}
+TEST_END
+
+TEST_BEGIN(test_insert_iter_remove) {
+#define NITEMS ZU(1000)
+ tsd_t *tsd;
+ ckh_t ckh;
+ void **p[NITEMS];
+ void *q, *r;
+ size_t i;
+
+ tsd = tsd_fetch();
+
+ expect_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
+ ckh_pointer_keycomp), "Unexpected ckh_new() error");
+
+ for (i = 0; i < NITEMS; i++) {
+ p[i] = mallocx(i+1, 0);
+ expect_ptr_not_null(p[i], "Unexpected mallocx() failure");
+ }
+
+ for (i = 0; i < NITEMS; i++) {
+ size_t j;
+
+ for (j = i; j < NITEMS; j++) {
+ expect_false(ckh_insert(tsd, &ckh, p[j], p[j]),
+ "Unexpected ckh_insert() failure");
+ expect_false(ckh_search(&ckh, p[j], &q, &r),
+ "Unexpected ckh_search() failure");
+ expect_ptr_eq(p[j], q, "Key pointer mismatch");
+ expect_ptr_eq(p[j], r, "Value pointer mismatch");
+ }
+
+ expect_zu_eq(ckh_count(&ckh), NITEMS,
+ "ckh_count() should return %zu, but it returned %zu",
+ NITEMS, ckh_count(&ckh));
+
+ for (j = i + 1; j < NITEMS; j++) {
+ expect_false(ckh_search(&ckh, p[j], NULL, NULL),
+ "Unexpected ckh_search() failure");
+ expect_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
+ "Unexpected ckh_remove() failure");
+ expect_ptr_eq(p[j], q, "Key pointer mismatch");
+ expect_ptr_eq(p[j], r, "Value pointer mismatch");
+ expect_true(ckh_search(&ckh, p[j], NULL, NULL),
+ "Unexpected ckh_search() success");
+ expect_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
+ "Unexpected ckh_remove() success");
+ }
+
+ {
+ bool seen[NITEMS];
+ size_t tabind;
+
+ memset(seen, 0, sizeof(seen));
+
+ for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
+ size_t k;
+
+ expect_ptr_eq(q, r, "Key and val not equal");
+
+ for (k = 0; k < NITEMS; k++) {
+ if (p[k] == q) {
+ expect_false(seen[k],
+ "Item %zu already seen", k);
+ seen[k] = true;
+ break;
+ }
+ }
+ }
+
+ for (j = 0; j < i + 1; j++) {
+ expect_true(seen[j], "Item %zu not seen", j);
+ }
+ for (; j < NITEMS; j++) {
+ expect_false(seen[j], "Item %zu seen", j);
+ }
+ }
+ }
+
+ for (i = 0; i < NITEMS; i++) {
+ expect_false(ckh_search(&ckh, p[i], NULL, NULL),
+ "Unexpected ckh_search() failure");
+ expect_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
+ "Unexpected ckh_remove() failure");
+ expect_ptr_eq(p[i], q, "Key pointer mismatch");
+ expect_ptr_eq(p[i], r, "Value pointer mismatch");
+ expect_true(ckh_search(&ckh, p[i], NULL, NULL),
+ "Unexpected ckh_search() success");
+ expect_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
+ "Unexpected ckh_remove() success");
+ dallocx(p[i], 0);
+ }
+
+ expect_zu_eq(ckh_count(&ckh), 0,
+ "ckh_count() should return %zu, but it returned %zu",
+ ZU(0), ckh_count(&ckh));
+ ckh_delete(tsd, &ckh);
+#undef NITEMS
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_new_delete,
+ test_count_insert_search_remove,
+ test_insert_iter_remove);
+}
diff --git a/deps/jemalloc/test/unit/counter.c b/deps/jemalloc/test/unit/counter.c
new file mode 100644
index 0000000..277baac
--- /dev/null
+++ b/deps/jemalloc/test/unit/counter.c
@@ -0,0 +1,80 @@
+#include "test/jemalloc_test.h"
+
+static const uint64_t interval = 1 << 20;
+
+TEST_BEGIN(test_counter_accum) {
+ uint64_t increment = interval >> 4;
+ unsigned n = interval / increment;
+ uint64_t accum = 0;
+
+ counter_accum_t c;
+ counter_accum_init(&c, interval);
+
+ tsd_t *tsd = tsd_fetch();
+ bool trigger;
+ for (unsigned i = 0; i < n; i++) {
+ trigger = counter_accum(tsd_tsdn(tsd), &c, increment);
+ accum += increment;
+ if (accum < interval) {
+ expect_b_eq(trigger, false, "Should not trigger");
+ } else {
+ expect_b_eq(trigger, true, "Should have triggered");
+ }
+ }
+ expect_b_eq(trigger, true, "Should have triggered");
+}
+TEST_END
+
+void
+expect_counter_value(counter_accum_t *c, uint64_t v) {
+ uint64_t accum = locked_read_u64_unsynchronized(&c->accumbytes);
+ expect_u64_eq(accum, v, "Counter value mismatch");
+}
+
+#define N_THDS (16)
+#define N_ITER_THD (1 << 12)
+#define ITER_INCREMENT (interval >> 4)
+
+static void *
+thd_start(void *varg) {
+ counter_accum_t *c = (counter_accum_t *)varg;
+
+ tsd_t *tsd = tsd_fetch();
+ bool trigger;
+ uintptr_t n_triggered = 0;
+ for (unsigned i = 0; i < N_ITER_THD; i++) {
+ trigger = counter_accum(tsd_tsdn(tsd), c, ITER_INCREMENT);
+ n_triggered += trigger ? 1 : 0;
+ }
+
+ return (void *)n_triggered;
+}
+
+
+TEST_BEGIN(test_counter_mt) {
+ counter_accum_t shared_c;
+ counter_accum_init(&shared_c, interval);
+
+ thd_t thds[N_THDS];
+ unsigned i;
+ for (i = 0; i < N_THDS; i++) {
+ thd_create(&thds[i], thd_start, (void *)&shared_c);
+ }
+
+ uint64_t sum = 0;
+ for (i = 0; i < N_THDS; i++) {
+ void *ret;
+ thd_join(thds[i], &ret);
+ sum += (uintptr_t)ret;
+ }
+ expect_u64_eq(sum, N_THDS * N_ITER_THD / (interval / ITER_INCREMENT),
+ "Incorrect number of triggers");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_counter_accum,
+ test_counter_mt);
+}
diff --git a/deps/jemalloc/test/unit/decay.c b/deps/jemalloc/test/unit/decay.c
new file mode 100644
index 0000000..bdb6d0a
--- /dev/null
+++ b/deps/jemalloc/test/unit/decay.c
@@ -0,0 +1,283 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/decay.h"
+
+TEST_BEGIN(test_decay_init) {
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
+
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
+
+ ssize_t decay_ms = 1000;
+ assert_true(decay_ms_valid(decay_ms), "");
+
+ expect_false(decay_init(&decay, &curtime, decay_ms),
+ "Failed to initialize decay");
+ expect_zd_eq(decay_ms_read(&decay), decay_ms,
+ "Decay_ms was initialized incorrectly");
+ expect_u64_ne(decay_epoch_duration_ns(&decay), 0,
+ "Epoch duration was initialized incorrectly");
+}
+TEST_END
+
+TEST_BEGIN(test_decay_ms_valid) {
+ expect_false(decay_ms_valid(-7),
+ "Misclassified negative decay as valid");
+ expect_true(decay_ms_valid(-1),
+ "Misclassified -1 (never decay) as invalid decay");
+ expect_true(decay_ms_valid(8943),
+ "Misclassified valid decay");
+ if (SSIZE_MAX > NSTIME_SEC_MAX) {
+ expect_false(
+ decay_ms_valid((ssize_t)(NSTIME_SEC_MAX * KQU(1000) + 39)),
+ "Misclassified too large decay");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_decay_npages_purge_in) {
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
+
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
+
+ uint64_t decay_ms = 1000;
+ nstime_t decay_nstime;
+ nstime_init(&decay_nstime, decay_ms * 1000 * 1000);
+ expect_false(decay_init(&decay, &curtime, (ssize_t)decay_ms),
+ "Failed to initialize decay");
+
+ size_t new_pages = 100;
+
+ nstime_t time;
+ nstime_copy(&time, &decay_nstime);
+ expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
+ new_pages, "Not all pages are expected to decay in decay_ms");
+
+ nstime_init(&time, 0);
+ expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages), 0,
+ "More than zero pages are expected to instantly decay");
+
+ nstime_copy(&time, &decay_nstime);
+ nstime_idivide(&time, 2);
+ expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
+ new_pages / 2, "Not half of pages decay in half the decay period");
+}
+TEST_END
+
+TEST_BEGIN(test_decay_maybe_advance_epoch) {
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
+
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
+
+ uint64_t decay_ms = 1000;
+
+ bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
+ expect_false(err, "");
+
+ bool advanced;
+ advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
+ expect_false(advanced, "Epoch advanced while time didn't");
+
+ nstime_t interval;
+ nstime_init(&interval, decay_epoch_duration_ns(&decay));
+
+ nstime_add(&curtime, &interval);
+ advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
+ expect_false(advanced, "Epoch advanced after first interval");
+
+ nstime_add(&curtime, &interval);
+ advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
+ expect_true(advanced, "Epoch didn't advance after two intervals");
+}
+TEST_END
+
+TEST_BEGIN(test_decay_empty) {
+ /* If we never have any decaying pages, npages_limit should be 0. */
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
+
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
+
+ uint64_t decay_ms = 1000;
+ uint64_t decay_ns = decay_ms * 1000 * 1000;
+
+ bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
+ assert_false(err, "");
+
+ uint64_t time_between_calls = decay_epoch_duration_ns(&decay) / 5;
+ int nepochs = 0;
+ for (uint64_t i = 0; i < decay_ns / time_between_calls * 10; i++) {
+ size_t dirty_pages = 0;
+ nstime_init(&curtime, i * time_between_calls);
+ bool epoch_advanced = decay_maybe_advance_epoch(&decay,
+ &curtime, dirty_pages);
+ if (epoch_advanced) {
+ nepochs++;
+ expect_zu_eq(decay_npages_limit_get(&decay), 0,
+ "Unexpectedly increased npages_limit");
+ }
+ }
+ expect_d_gt(nepochs, 0, "Epochs never advanced");
+}
+TEST_END
+
+/*
+ * Verify that npages_limit correctly decays as the time goes.
+ *
+ * During first 'nepoch_init' epochs, add new dirty pages.
+ * After that, let them decay and verify npages_limit decreases.
+ * Then proceed with another 'nepoch_init' epochs and check that
+ * all dirty pages are flushed out of backlog, bringing npages_limit
+ * down to zero.
+ */
+TEST_BEGIN(test_decay) {
+ const uint64_t nepoch_init = 10;
+
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
+
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
+
+ uint64_t decay_ms = 1000;
+ uint64_t decay_ns = decay_ms * 1000 * 1000;
+
+ bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
+ assert_false(err, "");
+
+ expect_zu_eq(decay_npages_limit_get(&decay), 0,
+ "Empty decay returned nonzero npages_limit");
+
+ nstime_t epochtime;
+ nstime_init(&epochtime, decay_epoch_duration_ns(&decay));
+
+ const size_t dirty_pages_per_epoch = 1000;
+ size_t dirty_pages = 0;
+ uint64_t epoch_ns = decay_epoch_duration_ns(&decay);
+ bool epoch_advanced = false;
+
+ /* Populate backlog with some dirty pages */
+ for (uint64_t i = 0; i < nepoch_init; i++) {
+ nstime_add(&curtime, &epochtime);
+ dirty_pages += dirty_pages_per_epoch;
+ epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
+ dirty_pages);
+ }
+ expect_true(epoch_advanced, "Epoch never advanced");
+
+ size_t npages_limit = decay_npages_limit_get(&decay);
+ expect_zu_gt(npages_limit, 0, "npages_limit is incorrectly equal "
+ "to zero after dirty pages have been added");
+
+ /* Keep dirty pages unchanged and verify that npages_limit decreases */
+ for (uint64_t i = nepoch_init; i * epoch_ns < decay_ns; ++i) {
+ nstime_add(&curtime, &epochtime);
+ epoch_advanced = decay_maybe_advance_epoch(&decay, &curtime,
+ dirty_pages);
+ if (epoch_advanced) {
+ size_t npages_limit_new = decay_npages_limit_get(&decay);
+ expect_zu_lt(npages_limit_new, npages_limit,
+ "napges_limit failed to decay");
+
+ npages_limit = npages_limit_new;
+ }
+ }
+
+ expect_zu_gt(npages_limit, 0, "npages_limit decayed to zero earlier "
+ "than decay_ms since last dirty page was added");
+
+ /* Completely push all dirty pages out of the backlog */
+ epoch_advanced = false;
+ for (uint64_t i = 0; i < nepoch_init; i++) {
+ nstime_add(&curtime, &epochtime);
+ epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
+ dirty_pages);
+ }
+ expect_true(epoch_advanced, "Epoch never advanced");
+
+ npages_limit = decay_npages_limit_get(&decay);
+ expect_zu_eq(npages_limit, 0, "npages_limit didn't decay to 0 after "
+ "decay_ms since last bump in dirty pages");
+}
+TEST_END
+
+TEST_BEGIN(test_decay_ns_until_purge) {
+ const uint64_t nepoch_init = 10;
+
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
+
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
+
+ uint64_t decay_ms = 1000;
+ uint64_t decay_ns = decay_ms * 1000 * 1000;
+
+ bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
+ assert_false(err, "");
+
+ nstime_t epochtime;
+ nstime_init(&epochtime, decay_epoch_duration_ns(&decay));
+
+ uint64_t ns_until_purge_empty = decay_ns_until_purge(&decay, 0, 0);
+ expect_u64_eq(ns_until_purge_empty, DECAY_UNBOUNDED_TIME_TO_PURGE,
+ "Failed to return unbounded wait time for zero threshold");
+
+ const size_t dirty_pages_per_epoch = 1000;
+ size_t dirty_pages = 0;
+ bool epoch_advanced = false;
+ for (uint64_t i = 0; i < nepoch_init; i++) {
+ nstime_add(&curtime, &epochtime);
+ dirty_pages += dirty_pages_per_epoch;
+ epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
+ dirty_pages);
+ }
+ expect_true(epoch_advanced, "Epoch never advanced");
+
+ uint64_t ns_until_purge_all = decay_ns_until_purge(&decay,
+ dirty_pages, dirty_pages);
+ expect_u64_ge(ns_until_purge_all, decay_ns,
+ "Incorrectly calculated time to purge all pages");
+
+ uint64_t ns_until_purge_none = decay_ns_until_purge(&decay,
+ dirty_pages, 0);
+ expect_u64_eq(ns_until_purge_none, decay_epoch_duration_ns(&decay) * 2,
+ "Incorrectly calculated time to purge 0 pages");
+
+ uint64_t npages_threshold = dirty_pages / 2;
+ uint64_t ns_until_purge_half = decay_ns_until_purge(&decay,
+ dirty_pages, npages_threshold);
+
+ nstime_t waittime;
+ nstime_init(&waittime, ns_until_purge_half);
+ nstime_add(&curtime, &waittime);
+
+ decay_maybe_advance_epoch(&decay, &curtime, dirty_pages);
+ size_t npages_limit = decay_npages_limit_get(&decay);
+ expect_zu_lt(npages_limit, dirty_pages,
+ "npages_limit failed to decrease after waiting");
+ size_t expected = dirty_pages - npages_limit;
+ int deviation = abs((int)expected - (int)(npages_threshold));
+ expect_d_lt(deviation, (int)(npages_threshold / 2),
+ "After waiting, number of pages is out of the expected interval "
+ "[0.5 * npages_threshold .. 1.5 * npages_threshold]");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_decay_init,
+ test_decay_ms_valid,
+ test_decay_npages_purge_in,
+ test_decay_maybe_advance_epoch,
+ test_decay_empty,
+ test_decay,
+ test_decay_ns_until_purge);
+}
diff --git a/deps/jemalloc/test/unit/div.c b/deps/jemalloc/test/unit/div.c
new file mode 100644
index 0000000..29aea66
--- /dev/null
+++ b/deps/jemalloc/test/unit/div.c
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/div.h"
+
+TEST_BEGIN(test_div_exhaustive) {
+ for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) {
+ div_info_t div_info;
+ div_init(&div_info, divisor);
+ size_t max = 1000 * divisor;
+ if (max < 1000 * 1000) {
+ max = 1000 * 1000;
+ }
+ for (size_t dividend = 0; dividend < 1000 * divisor;
+ dividend += divisor) {
+ size_t quotient = div_compute(
+ &div_info, dividend);
+ expect_zu_eq(dividend, quotient * divisor,
+ "With divisor = %zu, dividend = %zu, "
+ "got quotient %zu", divisor, dividend, quotient);
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_div_exhaustive);
+}
diff --git a/deps/jemalloc/test/unit/double_free.c b/deps/jemalloc/test/unit/double_free.c
new file mode 100644
index 0000000..12122c1
--- /dev/null
+++ b/deps/jemalloc/test/unit/double_free.c
@@ -0,0 +1,77 @@
+#include "test/jemalloc_test.h"
+#include "test/san.h"
+
+#include "jemalloc/internal/safety_check.h"
+
+bool fake_abort_called;
+void fake_abort(const char *message) {
+ (void)message;
+ fake_abort_called = true;
+}
+
+void
+test_large_double_free_pre(void) {
+ safety_check_set_abort(&fake_abort);
+ fake_abort_called = false;
+}
+
+void
+test_large_double_free_post() {
+ expect_b_eq(fake_abort_called, true, "Double-free check didn't fire.");
+ safety_check_set_abort(NULL);
+}
+
+TEST_BEGIN(test_large_double_free_tcache) {
+ test_skip_if(!config_opt_safety_checks);
+ /*
+ * Skip debug builds, since too many assertions will be triggered with
+ * double-free before hitting the one we are interested in.
+ */
+ test_skip_if(config_debug);
+
+ test_large_double_free_pre();
+ char *ptr = malloc(SC_LARGE_MINCLASS);
+ bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
+ free(ptr);
+ if (!guarded) {
+ free(ptr);
+ } else {
+ /*
+ * Skip because guarded extents may unguard immediately on
+ * deallocation, in which case the second free will crash before
+ * reaching the intended safety check.
+ */
+ fake_abort_called = true;
+ }
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+ test_large_double_free_post();
+}
+TEST_END
+
+TEST_BEGIN(test_large_double_free_no_tcache) {
+ test_skip_if(!config_opt_safety_checks);
+ test_skip_if(config_debug);
+
+ test_large_double_free_pre();
+ char *ptr = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
+ bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ if (!guarded) {
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ } else {
+ /*
+ * Skip because guarded extents may unguard immediately on
+ * deallocation, in which case the second free will crash before
+ * reaching the intended safety check.
+ */
+ fake_abort_called = true;
+ }
+ test_large_double_free_post();
+}
+TEST_END
+
+int
+main(void) {
+ return test(test_large_double_free_no_tcache,
+ test_large_double_free_tcache);
+}
diff --git a/deps/jemalloc/test/unit/double_free.h b/deps/jemalloc/test/unit/double_free.h
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/deps/jemalloc/test/unit/double_free.h
@@ -0,0 +1 @@
+
diff --git a/deps/jemalloc/test/unit/edata_cache.c b/deps/jemalloc/test/unit/edata_cache.c
new file mode 100644
index 0000000..af1110a
--- /dev/null
+++ b/deps/jemalloc/test/unit/edata_cache.c
@@ -0,0 +1,226 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/edata_cache.h"
+
+static void
+test_edata_cache_init(edata_cache_t *edata_cache) {
+ base_t *base = base_new(TSDN_NULL, /* ind */ 1,
+ &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
+ assert_ptr_not_null(base, "");
+ bool err = edata_cache_init(edata_cache, base);
+ assert_false(err, "");
+}
+
+static void
+test_edata_cache_destroy(edata_cache_t *edata_cache) {
+ base_delete(TSDN_NULL, edata_cache->base);
+}
+
+TEST_BEGIN(test_edata_cache) {
+ edata_cache_t ec;
+ test_edata_cache_init(&ec);
+
+ /* Get one */
+ edata_t *ed1 = edata_cache_get(TSDN_NULL, &ec);
+ assert_ptr_not_null(ed1, "");
+
+ /* Cache should be empty */
+ assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ /* Get another */
+ edata_t *ed2 = edata_cache_get(TSDN_NULL, &ec);
+ assert_ptr_not_null(ed2, "");
+
+ /* Still empty */
+ assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ /* Put one back, and the cache should now have one item */
+ edata_cache_put(TSDN_NULL, &ec, ed1);
+ assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 1, "");
+
+ /* Reallocating should reuse the item, and leave an empty cache. */
+ edata_t *ed1_again = edata_cache_get(TSDN_NULL, &ec);
+ assert_ptr_eq(ed1, ed1_again, "");
+ assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ test_edata_cache_destroy(&ec);
+}
+TEST_END
+
+static size_t
+ecf_count(edata_cache_fast_t *ecf) {
+ size_t count = 0;
+ edata_t *cur;
+ ql_foreach(cur, &ecf->list.head, ql_link_inactive) {
+ count++;
+ }
+ return count;
+}
+
+TEST_BEGIN(test_edata_cache_fast_simple) {
+ edata_cache_t ec;
+ edata_cache_fast_t ecf;
+
+ test_edata_cache_init(&ec);
+ edata_cache_fast_init(&ecf, &ec);
+
+ edata_t *ed1 = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(ed1, "");
+ expect_zu_eq(ecf_count(&ecf), 0, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ edata_t *ed2 = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(ed2, "");
+ expect_zu_eq(ecf_count(&ecf), 0, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ edata_cache_fast_put(TSDN_NULL, &ecf, ed1);
+ expect_zu_eq(ecf_count(&ecf), 1, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ edata_cache_fast_put(TSDN_NULL, &ecf, ed2);
+ expect_zu_eq(ecf_count(&ecf), 2, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ /* LIFO ordering. */
+ expect_ptr_eq(ed2, edata_cache_fast_get(TSDN_NULL, &ecf), "");
+ expect_zu_eq(ecf_count(&ecf), 1, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ expect_ptr_eq(ed1, edata_cache_fast_get(TSDN_NULL, &ecf), "");
+ expect_zu_eq(ecf_count(&ecf), 0, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ test_edata_cache_destroy(&ec);
+}
+TEST_END
+
+TEST_BEGIN(test_edata_cache_fill) {
+ edata_cache_t ec;
+ edata_cache_fast_t ecf;
+
+ test_edata_cache_init(&ec);
+ edata_cache_fast_init(&ecf, &ec);
+
+ edata_t *allocs[EDATA_CACHE_FAST_FILL * 2];
+
+ /*
+ * If the fallback cache can't satisfy the request, we shouldn't do
+ * extra allocations until compelled to. Put half the fill goal in the
+ * fallback.
+ */
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
+ allocs[i] = edata_cache_get(TSDN_NULL, &ec);
+ }
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
+ edata_cache_put(TSDN_NULL, &ec, allocs[i]);
+ }
+ expect_zu_eq(EDATA_CACHE_FAST_FILL / 2,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+
+ allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_zu_eq(EDATA_CACHE_FAST_FILL / 2 - 1, ecf_count(&ecf),
+ "Should have grabbed all edatas available but no more.");
+
+ for (int i = 1; i < EDATA_CACHE_FAST_FILL / 2; i++) {
+ allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(allocs[i], "");
+ }
+ expect_zu_eq(0, ecf_count(&ecf), "");
+
+ /* When forced, we should alloc from the base. */
+ edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(edata, "");
+ expect_zu_eq(0, ecf_count(&ecf), "Allocated more than necessary");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED),
+ "Allocated more than necessary");
+
+ /*
+ * We should correctly fill in the common case where the fallback isn't
+ * exhausted, too.
+ */
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
+ allocs[i] = edata_cache_get(TSDN_NULL, &ec);
+ expect_ptr_not_null(allocs[i], "");
+ }
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
+ edata_cache_put(TSDN_NULL, &ec, allocs[i]);
+ }
+
+ allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+ for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+ allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(allocs[i], "");
+ }
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+
+ allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+ for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+ allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(allocs[i], "");
+ }
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+
+ test_edata_cache_destroy(&ec);
+}
+TEST_END
+
+TEST_BEGIN(test_edata_cache_disable) {
+ edata_cache_t ec;
+ edata_cache_fast_t ecf;
+
+ test_edata_cache_init(&ec);
+ edata_cache_fast_init(&ecf, &ec);
+
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
+ edata_t *edata = edata_cache_get(TSDN_NULL, &ec);
+ expect_ptr_not_null(edata, "");
+ edata_cache_fast_put(TSDN_NULL, &ecf, edata);
+ }
+
+ expect_zu_eq(EDATA_CACHE_FAST_FILL, ecf_count(&ecf), "");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+
+ edata_cache_fast_disable(TSDN_NULL, &ecf);
+
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "Disabling should flush");
+
+ edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - 1,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED),
+ "Disabled ecf should forward on get");
+
+ edata_cache_fast_put(TSDN_NULL, &ecf, edata);
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED),
+ "Disabled ecf should forward on put");
+
+ test_edata_cache_destroy(&ec);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_edata_cache,
+ test_edata_cache_fast_simple,
+ test_edata_cache_fill,
+ test_edata_cache_disable);
+}
diff --git a/deps/jemalloc/test/unit/emitter.c b/deps/jemalloc/test/unit/emitter.c
new file mode 100644
index 0000000..ef8f9ff
--- /dev/null
+++ b/deps/jemalloc/test/unit/emitter.c
@@ -0,0 +1,533 @@
+#include "test/jemalloc_test.h"
+#include "jemalloc/internal/emitter.h"
+
+/*
+ * This is so useful for debugging and feature work, we'll leave printing
+ * functionality committed but disabled by default.
+ */
+/* Print the text as it will appear. */
+static bool print_raw = false;
+/* Print the text escaped, so it can be copied back into the test case. */
+static bool print_escaped = false;
+
+typedef struct buf_descriptor_s buf_descriptor_t;
+struct buf_descriptor_s {
+ char *buf;
+ size_t len;
+ bool mid_quote;
+};
+
+/*
+ * Forwards all writes to the passed-in buf_v (which should be cast from a
+ * buf_descriptor_t *).
+ */
+static void
+forwarding_cb(void *buf_descriptor_v, const char *str) {
+ buf_descriptor_t *buf_descriptor = (buf_descriptor_t *)buf_descriptor_v;
+
+ if (print_raw) {
+ malloc_printf("%s", str);
+ }
+ if (print_escaped) {
+ const char *it = str;
+ while (*it != '\0') {
+ if (!buf_descriptor->mid_quote) {
+ malloc_printf("\"");
+ buf_descriptor->mid_quote = true;
+ }
+ switch (*it) {
+ case '\\':
+ malloc_printf("\\");
+ break;
+ case '\"':
+ malloc_printf("\\\"");
+ break;
+ case '\t':
+ malloc_printf("\\t");
+ break;
+ case '\n':
+ malloc_printf("\\n\"\n");
+ buf_descriptor->mid_quote = false;
+ break;
+ default:
+ malloc_printf("%c", *it);
+ }
+ it++;
+ }
+ }
+
+ size_t written = malloc_snprintf(buf_descriptor->buf,
+ buf_descriptor->len, "%s", str);
+ expect_zu_eq(written, strlen(str), "Buffer overflow!");
+ buf_descriptor->buf += written;
+ buf_descriptor->len -= written;
+ expect_zu_gt(buf_descriptor->len, 0, "Buffer out of space!");
+}
+
+static void
+expect_emit_output(void (*emit_fn)(emitter_t *),
+ const char *expected_json_output,
+ const char *expected_json_compact_output,
+ const char *expected_table_output) {
+ emitter_t emitter;
+ char buf[MALLOC_PRINTF_BUFSIZE];
+ buf_descriptor_t buf_descriptor;
+
+ buf_descriptor.buf = buf;
+ buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
+ buf_descriptor.mid_quote = false;
+
+ emitter_init(&emitter, emitter_output_json, &forwarding_cb,
+ &buf_descriptor);
+ (*emit_fn)(&emitter);
+ expect_str_eq(expected_json_output, buf, "json output failure");
+
+ buf_descriptor.buf = buf;
+ buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
+ buf_descriptor.mid_quote = false;
+
+ emitter_init(&emitter, emitter_output_json_compact, &forwarding_cb,
+ &buf_descriptor);
+ (*emit_fn)(&emitter);
+ expect_str_eq(expected_json_compact_output, buf,
+ "compact json output failure");
+
+ buf_descriptor.buf = buf;
+ buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
+ buf_descriptor.mid_quote = false;
+
+ emitter_init(&emitter, emitter_output_table, &forwarding_cb,
+ &buf_descriptor);
+ (*emit_fn)(&emitter);
+ expect_str_eq(expected_table_output, buf, "table output failure");
+}
+
+static void
+emit_dict(emitter_t *emitter) {
+ bool b_false = false;
+ bool b_true = true;
+ int i_123 = 123;
+ const char *str = "a string";
+
+ emitter_begin(emitter);
+ emitter_dict_begin(emitter, "foo", "This is the foo table:");
+ emitter_kv(emitter, "abc", "ABC", emitter_type_bool, &b_false);
+ emitter_kv(emitter, "def", "DEF", emitter_type_bool, &b_true);
+ emitter_kv_note(emitter, "ghi", "GHI", emitter_type_int, &i_123,
+ "note_key1", emitter_type_string, &str);
+ emitter_kv_note(emitter, "jkl", "JKL", emitter_type_string, &str,
+ "note_key2", emitter_type_bool, &b_false);
+ emitter_dict_end(emitter);
+ emitter_end(emitter);
+}
+
+static const char *dict_json =
+"{\n"
+"\t\"foo\": {\n"
+"\t\t\"abc\": false,\n"
+"\t\t\"def\": true,\n"
+"\t\t\"ghi\": 123,\n"
+"\t\t\"jkl\": \"a string\"\n"
+"\t}\n"
+"}\n";
+static const char *dict_json_compact =
+"{"
+ "\"foo\":{"
+ "\"abc\":false,"
+ "\"def\":true,"
+ "\"ghi\":123,"
+ "\"jkl\":\"a string\""
+ "}"
+"}";
+static const char *dict_table =
+"This is the foo table:\n"
+" ABC: false\n"
+" DEF: true\n"
+" GHI: 123 (note_key1: \"a string\")\n"
+" JKL: \"a string\" (note_key2: false)\n";
+
+static void
+emit_table_printf(emitter_t *emitter) {
+ emitter_begin(emitter);
+ emitter_table_printf(emitter, "Table note 1\n");
+ emitter_table_printf(emitter, "Table note 2 %s\n",
+ "with format string");
+ emitter_end(emitter);
+}
+
+static const char *table_printf_json =
+"{\n"
+"}\n";
+static const char *table_printf_json_compact = "{}";
+static const char *table_printf_table =
+"Table note 1\n"
+"Table note 2 with format string\n";
+
+static void emit_nested_dict(emitter_t *emitter) {
+ int val = 123;
+ emitter_begin(emitter);
+ emitter_dict_begin(emitter, "json1", "Dict 1");
+ emitter_dict_begin(emitter, "json2", "Dict 2");
+ emitter_kv(emitter, "primitive", "A primitive", emitter_type_int, &val);
+ emitter_dict_end(emitter); /* Close 2 */
+ emitter_dict_begin(emitter, "json3", "Dict 3");
+ emitter_dict_end(emitter); /* Close 3 */
+ emitter_dict_end(emitter); /* Close 1 */
+ emitter_dict_begin(emitter, "json4", "Dict 4");
+ emitter_kv(emitter, "primitive", "Another primitive",
+ emitter_type_int, &val);
+ emitter_dict_end(emitter); /* Close 4 */
+ emitter_end(emitter);
+}
+
+static const char *nested_dict_json =
+"{\n"
+"\t\"json1\": {\n"
+"\t\t\"json2\": {\n"
+"\t\t\t\"primitive\": 123\n"
+"\t\t},\n"
+"\t\t\"json3\": {\n"
+"\t\t}\n"
+"\t},\n"
+"\t\"json4\": {\n"
+"\t\t\"primitive\": 123\n"
+"\t}\n"
+"}\n";
+static const char *nested_dict_json_compact =
+"{"
+ "\"json1\":{"
+ "\"json2\":{"
+ "\"primitive\":123"
+ "},"
+ "\"json3\":{"
+ "}"
+ "},"
+ "\"json4\":{"
+ "\"primitive\":123"
+ "}"
+"}";
+static const char *nested_dict_table =
+"Dict 1\n"
+" Dict 2\n"
+" A primitive: 123\n"
+" Dict 3\n"
+"Dict 4\n"
+" Another primitive: 123\n";
+
+static void
+emit_types(emitter_t *emitter) {
+ bool b = false;
+ int i = -123;
+ unsigned u = 123;
+ ssize_t zd = -456;
+ size_t zu = 456;
+ const char *str = "string";
+ uint32_t u32 = 789;
+ uint64_t u64 = 10000000000ULL;
+
+ emitter_begin(emitter);
+ emitter_kv(emitter, "k1", "K1", emitter_type_bool, &b);
+ emitter_kv(emitter, "k2", "K2", emitter_type_int, &i);
+ emitter_kv(emitter, "k3", "K3", emitter_type_unsigned, &u);
+ emitter_kv(emitter, "k4", "K4", emitter_type_ssize, &zd);
+ emitter_kv(emitter, "k5", "K5", emitter_type_size, &zu);
+ emitter_kv(emitter, "k6", "K6", emitter_type_string, &str);
+ emitter_kv(emitter, "k7", "K7", emitter_type_uint32, &u32);
+ emitter_kv(emitter, "k8", "K8", emitter_type_uint64, &u64);
+ /*
+ * We don't test the title type, since it's only used for tables. It's
+ * tested in the emitter_table_row tests.
+ */
+ emitter_end(emitter);
+}
+
+static const char *types_json =
+"{\n"
+"\t\"k1\": false,\n"
+"\t\"k2\": -123,\n"
+"\t\"k3\": 123,\n"
+"\t\"k4\": -456,\n"
+"\t\"k5\": 456,\n"
+"\t\"k6\": \"string\",\n"
+"\t\"k7\": 789,\n"
+"\t\"k8\": 10000000000\n"
+"}\n";
+static const char *types_json_compact =
+"{"
+ "\"k1\":false,"
+ "\"k2\":-123,"
+ "\"k3\":123,"
+ "\"k4\":-456,"
+ "\"k5\":456,"
+ "\"k6\":\"string\","
+ "\"k7\":789,"
+ "\"k8\":10000000000"
+"}";
+static const char *types_table =
+"K1: false\n"
+"K2: -123\n"
+"K3: 123\n"
+"K4: -456\n"
+"K5: 456\n"
+"K6: \"string\"\n"
+"K7: 789\n"
+"K8: 10000000000\n";
+
+static void
+emit_modal(emitter_t *emitter) {
+ int val = 123;
+ emitter_begin(emitter);
+ emitter_dict_begin(emitter, "j0", "T0");
+ emitter_json_key(emitter, "j1");
+ emitter_json_object_begin(emitter);
+ emitter_kv(emitter, "i1", "I1", emitter_type_int, &val);
+ emitter_json_kv(emitter, "i2", emitter_type_int, &val);
+ emitter_table_kv(emitter, "I3", emitter_type_int, &val);
+ emitter_table_dict_begin(emitter, "T1");
+ emitter_kv(emitter, "i4", "I4", emitter_type_int, &val);
+ emitter_json_object_end(emitter); /* Close j1 */
+ emitter_kv(emitter, "i5", "I5", emitter_type_int, &val);
+ emitter_table_dict_end(emitter); /* Close T1 */
+ emitter_kv(emitter, "i6", "I6", emitter_type_int, &val);
+ emitter_dict_end(emitter); /* Close j0 / T0 */
+ emitter_end(emitter);
+}
+
+const char *modal_json =
+"{\n"
+"\t\"j0\": {\n"
+"\t\t\"j1\": {\n"
+"\t\t\t\"i1\": 123,\n"
+"\t\t\t\"i2\": 123,\n"
+"\t\t\t\"i4\": 123\n"
+"\t\t},\n"
+"\t\t\"i5\": 123,\n"
+"\t\t\"i6\": 123\n"
+"\t}\n"
+"}\n";
+const char *modal_json_compact =
+"{"
+ "\"j0\":{"
+ "\"j1\":{"
+ "\"i1\":123,"
+ "\"i2\":123,"
+ "\"i4\":123"
+ "},"
+ "\"i5\":123,"
+ "\"i6\":123"
+ "}"
+"}";
+const char *modal_table =
+"T0\n"
+" I1: 123\n"
+" I3: 123\n"
+" T1\n"
+" I4: 123\n"
+" I5: 123\n"
+" I6: 123\n";
+
+static void
+emit_json_array(emitter_t *emitter) {
+ int ival = 123;
+
+ emitter_begin(emitter);
+ emitter_json_key(emitter, "dict");
+ emitter_json_object_begin(emitter);
+ emitter_json_key(emitter, "arr");
+ emitter_json_array_begin(emitter);
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "foo", emitter_type_int, &ival);
+ emitter_json_object_end(emitter); /* Close arr[0] */
+ /* arr[1] and arr[2] are primitives. */
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "bar", emitter_type_int, &ival);
+ emitter_json_kv(emitter, "baz", emitter_type_int, &ival);
+ emitter_json_object_end(emitter); /* Close arr[3]. */
+ emitter_json_array_end(emitter); /* Close arr. */
+ emitter_json_object_end(emitter); /* Close dict. */
+ emitter_end(emitter);
+}
+
+static const char *json_array_json =
+"{\n"
+"\t\"dict\": {\n"
+"\t\t\"arr\": [\n"
+"\t\t\t{\n"
+"\t\t\t\t\"foo\": 123\n"
+"\t\t\t},\n"
+"\t\t\t123,\n"
+"\t\t\t123,\n"
+"\t\t\t{\n"
+"\t\t\t\t\"bar\": 123,\n"
+"\t\t\t\t\"baz\": 123\n"
+"\t\t\t}\n"
+"\t\t]\n"
+"\t}\n"
+"}\n";
+static const char *json_array_json_compact =
+"{"
+ "\"dict\":{"
+ "\"arr\":["
+ "{"
+ "\"foo\":123"
+ "},"
+ "123,"
+ "123,"
+ "{"
+ "\"bar\":123,"
+ "\"baz\":123"
+ "}"
+ "]"
+ "}"
+"}";
+static const char *json_array_table = "";
+
+static void
+emit_json_nested_array(emitter_t *emitter) {
+ int ival = 123;
+ char *sval = "foo";
+ emitter_begin(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_value(emitter, emitter_type_string, &sval);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_value(emitter, emitter_type_string, &sval);
+ emitter_json_array_end(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_array_end(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_value(emitter, emitter_type_string, &sval);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_array_end(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_array_end(emitter);
+ emitter_json_array_end(emitter);
+ emitter_end(emitter);
+}
+
+static const char *json_nested_array_json =
+"{\n"
+"\t[\n"
+"\t\t[\n"
+"\t\t\t123,\n"
+"\t\t\t\"foo\",\n"
+"\t\t\t123,\n"
+"\t\t\t\"foo\"\n"
+"\t\t],\n"
+"\t\t[\n"
+"\t\t\t123\n"
+"\t\t],\n"
+"\t\t[\n"
+"\t\t\t\"foo\",\n"
+"\t\t\t123\n"
+"\t\t],\n"
+"\t\t[\n"
+"\t\t]\n"
+"\t]\n"
+"}\n";
+static const char *json_nested_array_json_compact =
+"{"
+ "["
+ "["
+ "123,"
+ "\"foo\","
+ "123,"
+ "\"foo\""
+ "],"
+ "["
+ "123"
+ "],"
+ "["
+ "\"foo\","
+ "123"
+ "],"
+ "["
+ "]"
+ "]"
+"}";
+static const char *json_nested_array_table = "";
+
+static void
+emit_table_row(emitter_t *emitter) {
+ emitter_begin(emitter);
+ emitter_row_t row;
+ emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}};
+ abc.str_val = "ABC title";
+ emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}};
+ def.str_val = "DEF title";
+ emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}};
+ ghi.str_val = "GHI";
+
+ emitter_row_init(&row);
+ emitter_col_init(&abc, &row);
+ emitter_col_init(&def, &row);
+ emitter_col_init(&ghi, &row);
+
+ emitter_table_row(emitter, &row);
+
+ abc.type = emitter_type_int;
+ def.type = emitter_type_bool;
+ ghi.type = emitter_type_int;
+
+ abc.int_val = 123;
+ def.bool_val = true;
+ ghi.int_val = 456;
+ emitter_table_row(emitter, &row);
+
+ abc.int_val = 789;
+ def.bool_val = false;
+ ghi.int_val = 1011;
+ emitter_table_row(emitter, &row);
+
+ abc.type = emitter_type_string;
+ abc.str_val = "a string";
+ def.bool_val = false;
+ ghi.type = emitter_type_title;
+ ghi.str_val = "ghi";
+ emitter_table_row(emitter, &row);
+
+ emitter_end(emitter);
+}
+
+static const char *table_row_json =
+"{\n"
+"}\n";
+static const char *table_row_json_compact = "{}";
+static const char *table_row_table =
+"ABC title DEF title GHI\n"
+"123 true 456\n"
+"789 false 1011\n"
+"\"a string\" false ghi\n";
+
+#define GENERATE_TEST(feature) \
+TEST_BEGIN(test_##feature) { \
+ expect_emit_output(emit_##feature, feature##_json, \
+ feature##_json_compact, feature##_table); \
+} \
+TEST_END
+
+GENERATE_TEST(dict)
+GENERATE_TEST(table_printf)
+GENERATE_TEST(nested_dict)
+GENERATE_TEST(types)
+GENERATE_TEST(modal)
+GENERATE_TEST(json_array)
+GENERATE_TEST(json_nested_array)
+GENERATE_TEST(table_row)
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_dict,
+ test_table_printf,
+ test_nested_dict,
+ test_types,
+ test_modal,
+ test_json_array,
+ test_json_nested_array,
+ test_table_row);
+}
diff --git a/deps/jemalloc/test/unit/extent_quantize.c b/deps/jemalloc/test/unit/extent_quantize.c
new file mode 100644
index 0000000..e6bbd53
--- /dev/null
+++ b/deps/jemalloc/test/unit/extent_quantize.c
@@ -0,0 +1,141 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_small_extent_size) {
+ unsigned nbins, i;
+ size_t sz, extent_size;
+ size_t mib[4];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+
+ /*
+ * Iterate over all small size classes, get their extent sizes, and
+ * verify that the quantized size is the same as the extent size.
+ */
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib failure");
+ for (i = 0; i < nbins; i++) {
+ mib[2] = i;
+ sz = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
+ NULL, 0), 0, "Unexpected mallctlbymib failure");
+ expect_zu_eq(extent_size,
+ sz_psz_quantize_floor(extent_size),
+ "Small extent quantization should be a no-op "
+ "(extent_size=%zu)", extent_size);
+ expect_zu_eq(extent_size,
+ sz_psz_quantize_ceil(extent_size),
+ "Small extent quantization should be a no-op "
+ "(extent_size=%zu)", extent_size);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_large_extent_size) {
+ bool cache_oblivious;
+ unsigned nlextents, i;
+ size_t sz, extent_size_prev, ceil_prev;
+ size_t mib[4];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+
+ /*
+ * Iterate over all large size classes, get their extent sizes, and
+ * verify that the quantized size is the same as the extent size.
+ */
+
+ sz = sizeof(bool);
+ expect_d_eq(mallctl("opt.cache_oblivious", (void *)&cache_oblivious,
+ &sz, NULL, 0), 0, "Unexpected mallctl failure");
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib failure");
+ for (i = 0; i < nlextents; i++) {
+ size_t lextent_size, extent_size, floor, ceil;
+
+ mib[2] = i;
+ sz = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
+ &sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
+ extent_size = cache_oblivious ? lextent_size + PAGE :
+ lextent_size;
+ floor = sz_psz_quantize_floor(extent_size);
+ ceil = sz_psz_quantize_ceil(extent_size);
+
+ expect_zu_eq(extent_size, floor,
+ "Extent quantization should be a no-op for precise size "
+ "(lextent_size=%zu, extent_size=%zu)", lextent_size,
+ extent_size);
+ expect_zu_eq(extent_size, ceil,
+ "Extent quantization should be a no-op for precise size "
+ "(lextent_size=%zu, extent_size=%zu)", lextent_size,
+ extent_size);
+
+ if (i > 0) {
+ expect_zu_eq(extent_size_prev,
+ sz_psz_quantize_floor(extent_size - PAGE),
+ "Floor should be a precise size");
+ if (extent_size_prev < ceil_prev) {
+ expect_zu_eq(ceil_prev, extent_size,
+ "Ceiling should be a precise size "
+ "(extent_size_prev=%zu, ceil_prev=%zu, "
+ "extent_size=%zu)", extent_size_prev,
+ ceil_prev, extent_size);
+ }
+ }
+ if (i + 1 < nlextents) {
+ extent_size_prev = floor;
+ ceil_prev = sz_psz_quantize_ceil(extent_size +
+ PAGE);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_monotonic) {
+#define SZ_MAX ZU(4 * 1024 * 1024)
+ unsigned i;
+ size_t floor_prev, ceil_prev;
+
+ floor_prev = 0;
+ ceil_prev = 0;
+ for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) {
+ size_t extent_size, floor, ceil;
+
+ extent_size = i << LG_PAGE;
+ floor = sz_psz_quantize_floor(extent_size);
+ ceil = sz_psz_quantize_ceil(extent_size);
+
+ expect_zu_le(floor, extent_size,
+ "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
+ floor, extent_size, ceil);
+ expect_zu_ge(ceil, extent_size,
+ "Ceiling should be >= (floor=%zu, extent_size=%zu, "
+ "ceil=%zu)", floor, extent_size, ceil);
+
+ expect_zu_le(floor_prev, floor, "Floor should be monotonic "
+ "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
+ floor_prev, floor, extent_size, ceil);
+ expect_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
+ "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
+ floor, extent_size, ceil_prev, ceil);
+
+ floor_prev = floor;
+ ceil_prev = ceil;
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_small_extent_size,
+ test_large_extent_size,
+ test_monotonic);
+}
diff --git a/deps/jemalloc/test/unit/fb.c b/deps/jemalloc/test/unit/fb.c
new file mode 100644
index 0000000..ad72c75
--- /dev/null
+++ b/deps/jemalloc/test/unit/fb.c
@@ -0,0 +1,954 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/fb.h"
+#include "test/nbits.h"
+
+static void
+do_test_init(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+ /* Junk fb's contents. */
+ memset(fb, 99, sz);
+ fb_init(fb, nbits);
+ for (size_t i = 0; i < nbits; i++) {
+ expect_false(fb_get(fb, nbits, i),
+ "bitmap should start empty");
+ }
+ free(fb);
+}
+
+TEST_BEGIN(test_fb_init) {
+#define NB(nbits) \
+ do_test_init(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+do_test_get_set_unset(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+ fb_init(fb, nbits);
+ /* Set the bits divisible by 3. */
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 3 == 0) {
+ fb_set(fb, nbits, i);
+ }
+ }
+ /* Check them. */
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(i % 3 == 0, fb_get(fb, nbits, i),
+ "Unexpected bit at position %zu", i);
+ }
+ /* Unset those divisible by 5. */
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 5 == 0) {
+ fb_unset(fb, nbits, i);
+ }
+ }
+ /* Check them. */
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(i % 3 == 0 && i % 5 != 0, fb_get(fb, nbits, i),
+ "Unexpected bit at position %zu", i);
+ }
+ free(fb);
+}
+
+TEST_BEGIN(test_get_set_unset) {
+#define NB(nbits) \
+ do_test_get_set_unset(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static ssize_t
+find_3_5_compute(ssize_t i, size_t nbits, bool bit, bool forward) {
+ for(; i < (ssize_t)nbits && i >= 0; i += (forward ? 1 : -1)) {
+ bool expected_bit = i % 3 == 0 || i % 5 == 0;
+ if (expected_bit == bit) {
+ return i;
+ }
+ }
+ return forward ? (ssize_t)nbits : (ssize_t)-1;
+}
+
+static void
+do_test_search_simple(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+ fb_init(fb, nbits);
+
+ /* We pick multiples of 3 or 5. */
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 3 == 0) {
+ fb_set(fb, nbits, i);
+ }
+ /* This tests double-setting a little, too. */
+ if (i % 5 == 0) {
+ fb_set(fb, nbits, i);
+ }
+ }
+ for (size_t i = 0; i < nbits; i++) {
+ size_t ffs_compute = find_3_5_compute(i, nbits, true, true);
+ size_t ffs_search = fb_ffs(fb, nbits, i);
+ expect_zu_eq(ffs_compute, ffs_search, "ffs mismatch at %zu", i);
+
+ ssize_t fls_compute = find_3_5_compute(i, nbits, true, false);
+ size_t fls_search = fb_fls(fb, nbits, i);
+ expect_zu_eq(fls_compute, fls_search, "fls mismatch at %zu", i);
+
+ size_t ffu_compute = find_3_5_compute(i, nbits, false, true);
+ size_t ffu_search = fb_ffu(fb, nbits, i);
+ expect_zu_eq(ffu_compute, ffu_search, "ffu mismatch at %zu", i);
+
+ size_t flu_compute = find_3_5_compute(i, nbits, false, false);
+ size_t flu_search = fb_flu(fb, nbits, i);
+ expect_zu_eq(flu_compute, flu_search, "flu mismatch at %zu", i);
+ }
+
+ free(fb);
+}
+
+TEST_BEGIN(test_search_simple) {
+#define NB(nbits) \
+ do_test_search_simple(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+expect_exhaustive_results(fb_group_t *mostly_full, fb_group_t *mostly_empty,
+ size_t nbits, size_t special_bit, size_t position) {
+ if (position < special_bit) {
+ expect_zu_eq(special_bit, fb_ffs(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(-1, fb_fls(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_flu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+
+ expect_zu_eq(position, fb_ffs(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_fls(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(special_bit, fb_ffu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(-1, fb_flu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ } else if (position == special_bit) {
+ expect_zu_eq(special_bit, fb_ffs(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(position + 1, fb_ffu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position - 1, fb_flu(mostly_empty, nbits,
+ position), "mismatch at %zu, %zu", position, special_bit);
+
+ expect_zu_eq(position + 1, fb_ffs(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position - 1, fb_fls(mostly_full, nbits,
+ position), "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(position, fb_ffu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_flu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ } else {
+ /* position > special_bit. */
+ expect_zu_eq(nbits, fb_ffs(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits,
+ position), "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_flu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+
+ expect_zu_eq(position, fb_ffs(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_fls(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(nbits, fb_ffu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(special_bit, fb_flu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ }
+}
+
+static void
+do_test_search_exhaustive(size_t nbits) {
+ /* This test is quadratic; let's not get too big. */
+ if (nbits > 1000) {
+ return;
+ }
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *empty = malloc(sz);
+ fb_init(empty, nbits);
+ fb_group_t *full = malloc(sz);
+ fb_init(full, nbits);
+ fb_set_range(full, nbits, 0, nbits);
+
+ for (size_t i = 0; i < nbits; i++) {
+ fb_set(empty, nbits, i);
+ fb_unset(full, nbits, i);
+
+ for (size_t j = 0; j < nbits; j++) {
+ expect_exhaustive_results(full, empty, nbits, i, j);
+ }
+ fb_unset(empty, nbits, i);
+ fb_set(full, nbits, i);
+ }
+
+ free(empty);
+ free(full);
+}
+
+TEST_BEGIN(test_search_exhaustive) {
+#define NB(nbits) \
+ do_test_search_exhaustive(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+TEST_BEGIN(test_range_simple) {
+ /*
+ * Just pick a constant big enough to have nontrivial middle sizes, and
+ * big enough that usages of things like weirdnum (below) near the
+ * beginning fit comfortably into the beginning of the bitmap.
+ */
+ size_t nbits = 64 * 10;
+ size_t ngroups = FB_NGROUPS(nbits);
+ fb_group_t *fb = malloc(sizeof(fb_group_t) * ngroups);
+ fb_init(fb, nbits);
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 2 == 0) {
+ fb_set_range(fb, nbits, i, 1);
+ }
+ }
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(i % 2 == 0, fb_get(fb, nbits, i),
+ "mismatch at position %zu", i);
+ }
+ fb_set_range(fb, nbits, 0, nbits / 2);
+ fb_unset_range(fb, nbits, nbits / 2, nbits / 2);
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(i < nbits / 2, fb_get(fb, nbits, i),
+ "mismatch at position %zu", i);
+ }
+
+ static const size_t weirdnum = 7;
+ fb_set_range(fb, nbits, 0, nbits);
+ fb_unset_range(fb, nbits, weirdnum, FB_GROUP_BITS + weirdnum);
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(7 <= i && i <= 2 * weirdnum + FB_GROUP_BITS - 1,
+ !fb_get(fb, nbits, i), "mismatch at position %zu", i);
+ }
+ free(fb);
+}
+TEST_END
+
+static void
+do_test_empty_full_exhaustive(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *empty = malloc(sz);
+ fb_init(empty, nbits);
+ fb_group_t *full = malloc(sz);
+ fb_init(full, nbits);
+ fb_set_range(full, nbits, 0, nbits);
+
+ expect_true(fb_full(full, nbits), "");
+ expect_false(fb_empty(full, nbits), "");
+ expect_false(fb_full(empty, nbits), "");
+ expect_true(fb_empty(empty, nbits), "");
+
+ for (size_t i = 0; i < nbits; i++) {
+ fb_set(empty, nbits, i);
+ fb_unset(full, nbits, i);
+
+ expect_false(fb_empty(empty, nbits), "error at bit %zu", i);
+ if (nbits != 1) {
+ expect_false(fb_full(empty, nbits),
+ "error at bit %zu", i);
+ expect_false(fb_empty(full, nbits),
+ "error at bit %zu", i);
+ } else {
+ expect_true(fb_full(empty, nbits),
+ "error at bit %zu", i);
+ expect_true(fb_empty(full, nbits),
+ "error at bit %zu", i);
+ }
+ expect_false(fb_full(full, nbits), "error at bit %zu", i);
+
+ fb_unset(empty, nbits, i);
+ fb_set(full, nbits, i);
+ }
+
+ free(empty);
+ free(full);
+}
+
+TEST_BEGIN(test_empty_full) {
+#define NB(nbits) \
+ do_test_empty_full_exhaustive(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+/*
+ * This tests both iter_range and the longest range functionality, which is
+ * built closely on top of it.
+ */
+TEST_BEGIN(test_iter_range_simple) {
+ size_t set_limit = 30;
+ size_t nbits = 100;
+ fb_group_t fb[FB_NGROUPS(100)];
+
+ fb_init(fb, nbits);
+
+ /*
+ * Failing to initialize these can lead to build failures with -Wall;
+ * the compiler can't prove that they're set.
+ */
+ size_t begin = (size_t)-1;
+ size_t len = (size_t)-1;
+ bool result;
+
+ /* A set of checks with only the first set_limit bits *set*. */
+ fb_set_range(fb, nbits, 0, set_limit);
+ expect_zu_eq(set_limit, fb_srange_longest(fb, nbits),
+ "Incorrect longest set range");
+ expect_zu_eq(nbits - set_limit, fb_urange_longest(fb, nbits),
+ "Incorrect longest unset range");
+ for (size_t i = 0; i < set_limit; i++) {
+ result = fb_srange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(set_limit - i, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(nbits - set_limit, len, "Incorrect len at %zu", i);
+
+ result = fb_srange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(i + 1, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_riter(fb, nbits, i, &begin, &len);
+ expect_false(result, "Should not have found a range at %zu", i);
+ }
+ for (size_t i = set_limit; i < nbits; i++) {
+ result = fb_srange_iter(fb, nbits, i, &begin, &len);
+ expect_false(result, "Should not have found a range at %zu", i);
+
+ result = fb_urange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(nbits - i, len, "Incorrect len at %zu", i);
+
+ result = fb_srange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(set_limit, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(i - set_limit + 1, len, "Incorrect len at %zu", i);
+ }
+
+ /* A set of checks with only the first set_limit bits *unset*. */
+ fb_unset_range(fb, nbits, 0, set_limit);
+ fb_set_range(fb, nbits, set_limit, nbits - set_limit);
+ expect_zu_eq(nbits - set_limit, fb_srange_longest(fb, nbits),
+ "Incorrect longest set range");
+ expect_zu_eq(set_limit, fb_urange_longest(fb, nbits),
+ "Incorrect longest unset range");
+ for (size_t i = 0; i < set_limit; i++) {
+ result = fb_srange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(nbits - set_limit, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(set_limit - i, len, "Incorrect len at %zu", i);
+
+ result = fb_srange_riter(fb, nbits, i, &begin, &len);
+ expect_false(result, "Should not have found a range at %zu", i);
+
+ result = fb_urange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should not have found a range at %zu", i);
+ expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(i + 1, len, "Incorrect len at %zu", i);
+ }
+ for (size_t i = set_limit; i < nbits; i++) {
+ result = fb_srange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(nbits - i, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_iter(fb, nbits, i, &begin, &len);
+ expect_false(result, "Should not have found a range at %zu", i);
+
+ result = fb_srange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(i - set_limit + 1, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(set_limit, len, "Incorrect len at %zu", i);
+ }
+
+}
+TEST_END
+
+/*
+ * Doing this bit-by-bit is too slow for a real implementation, but for testing
+ * code, it's easy to get right. In the exhaustive tests, we'll compare the
+ * (fast but tricky) real implementation against the (slow but simple) testing
+ * one.
+ */
+static bool
+fb_iter_simple(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
+ size_t *r_len, bool val, bool forward) {
+ ssize_t stride = (forward ? (ssize_t)1 : (ssize_t)-1);
+ ssize_t range_begin = (ssize_t)start;
+ for (; range_begin != (ssize_t)nbits && range_begin != -1;
+ range_begin += stride) {
+ if (fb_get(fb, nbits, range_begin) == val) {
+ ssize_t range_end = range_begin;
+ for (; range_end != (ssize_t)nbits && range_end != -1;
+ range_end += stride) {
+ if (fb_get(fb, nbits, range_end) != val) {
+ break;
+ }
+ }
+ if (forward) {
+ *r_begin = range_begin;
+ *r_len = range_end - range_begin;
+ } else {
+ *r_begin = range_end + 1;
+ *r_len = range_begin - range_end;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Similar, but for finding longest ranges. */
+static size_t
+fb_range_longest_simple(fb_group_t *fb, size_t nbits, bool val) {
+ size_t longest_so_far = 0;
+ for (size_t begin = 0; begin < nbits; begin++) {
+ if (fb_get(fb, nbits, begin) != val) {
+ continue;
+ }
+ size_t end = begin + 1;
+ for (; end < nbits; end++) {
+ if (fb_get(fb, nbits, end) != val) {
+ break;
+ }
+ }
+ if (end - begin > longest_so_far) {
+ longest_so_far = end - begin;
+ }
+ }
+ return longest_so_far;
+}
+
+static void
+expect_iter_results_at(fb_group_t *fb, size_t nbits, size_t pos,
+ bool val, bool forward) {
+ bool iter_res;
+ size_t iter_begin JEMALLOC_CC_SILENCE_INIT(0);
+ size_t iter_len JEMALLOC_CC_SILENCE_INIT(0);
+ if (val) {
+ if (forward) {
+ iter_res = fb_srange_iter(fb, nbits, pos,
+ &iter_begin, &iter_len);
+ } else {
+ iter_res = fb_srange_riter(fb, nbits, pos,
+ &iter_begin, &iter_len);
+ }
+ } else {
+ if (forward) {
+ iter_res = fb_urange_iter(fb, nbits, pos,
+ &iter_begin, &iter_len);
+ } else {
+ iter_res = fb_urange_riter(fb, nbits, pos,
+ &iter_begin, &iter_len);
+ }
+ }
+
+ bool simple_iter_res;
+ /*
+ * These are dead stores, but the compiler can't always figure that out
+ * statically, and warns on the uninitialized variable.
+ */
+ size_t simple_iter_begin = 0;
+ size_t simple_iter_len = 0;
+ simple_iter_res = fb_iter_simple(fb, nbits, pos, &simple_iter_begin,
+ &simple_iter_len, val, forward);
+
+ expect_b_eq(iter_res, simple_iter_res, "Result mismatch at %zu", pos);
+ if (iter_res && simple_iter_res) {
+ assert_zu_eq(iter_begin, simple_iter_begin,
+ "Begin mismatch at %zu", pos);
+ expect_zu_eq(iter_len, simple_iter_len,
+ "Length mismatch at %zu", pos);
+ }
+}
+
+static void
+expect_iter_results(fb_group_t *fb, size_t nbits) {
+ for (size_t i = 0; i < nbits; i++) {
+ expect_iter_results_at(fb, nbits, i, false, false);
+ expect_iter_results_at(fb, nbits, i, false, true);
+ expect_iter_results_at(fb, nbits, i, true, false);
+ expect_iter_results_at(fb, nbits, i, true, true);
+ }
+ expect_zu_eq(fb_range_longest_simple(fb, nbits, true),
+ fb_srange_longest(fb, nbits), "Longest range mismatch");
+ expect_zu_eq(fb_range_longest_simple(fb, nbits, false),
+ fb_urange_longest(fb, nbits), "Longest range mismatch");
+}
+
+static void
+set_pattern_3(fb_group_t *fb, size_t nbits, bool zero_val) {
+ for (size_t i = 0; i < nbits; i++) {
+ if ((i % 6 < 3 && zero_val) || (i % 6 >= 3 && !zero_val)) {
+ fb_set(fb, nbits, i);
+ } else {
+ fb_unset(fb, nbits, i);
+ }
+ }
+}
+
+static void
+do_test_iter_range_exhaustive(size_t nbits) {
+ /* This test is also pretty slow. */
+ if (nbits > 1000) {
+ return;
+ }
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+ fb_init(fb, nbits);
+
+ set_pattern_3(fb, nbits, /* zero_val */ true);
+ expect_iter_results(fb, nbits);
+
+ set_pattern_3(fb, nbits, /* zero_val */ false);
+ expect_iter_results(fb, nbits);
+
+ fb_set_range(fb, nbits, 0, nbits);
+ fb_unset_range(fb, nbits, 0, nbits / 2 == 0 ? 1 : nbits / 2);
+ expect_iter_results(fb, nbits);
+
+ fb_unset_range(fb, nbits, 0, nbits);
+ fb_set_range(fb, nbits, 0, nbits / 2 == 0 ? 1: nbits / 2);
+ expect_iter_results(fb, nbits);
+
+ free(fb);
+}
+
+/*
+ * Like test_iter_range_simple, this tests both iteration and longest-range
+ * computation.
+ */
+TEST_BEGIN(test_iter_range_exhaustive) {
+#define NB(nbits) \
+ do_test_iter_range_exhaustive(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+/*
+ * If all set bits in the bitmap are contiguous, in [set_start, set_end),
+ * returns the number of set bits in [scount_start, scount_end).
+ */
+static size_t
+scount_contiguous(size_t set_start, size_t set_end, size_t scount_start,
+ size_t scount_end) {
+ /* No overlap. */
+ if (set_end <= scount_start || scount_end <= set_start) {
+ return 0;
+ }
+ /* set range contains scount range */
+ if (set_start <= scount_start && set_end >= scount_end) {
+ return scount_end - scount_start;
+ }
+ /* scount range contains set range. */
+ if (scount_start <= set_start && scount_end >= set_end) {
+ return set_end - set_start;
+ }
+ /* Partial overlap, with set range starting first. */
+ if (set_start < scount_start && set_end < scount_end) {
+ return set_end - scount_start;
+ }
+ /* Partial overlap, with scount range starting first. */
+ if (scount_start < set_start && scount_end < set_end) {
+ return scount_end - set_start;
+ }
+ /*
+ * Trigger an assert failure; the above list should have been
+ * exhaustive.
+ */
+ unreachable();
+}
+
+static size_t
+ucount_contiguous(size_t set_start, size_t set_end, size_t ucount_start,
+ size_t ucount_end) {
+ /* No overlap. */
+ if (set_end <= ucount_start || ucount_end <= set_start) {
+ return ucount_end - ucount_start;
+ }
+ /* set range contains ucount range */
+ if (set_start <= ucount_start && set_end >= ucount_end) {
+ return 0;
+ }
+ /* ucount range contains set range. */
+ if (ucount_start <= set_start && ucount_end >= set_end) {
+ return (ucount_end - ucount_start) - (set_end - set_start);
+ }
+ /* Partial overlap, with set range starting first. */
+ if (set_start < ucount_start && set_end < ucount_end) {
+ return ucount_end - set_end;
+ }
+ /* Partial overlap, with ucount range starting first. */
+ if (ucount_start < set_start && ucount_end < set_end) {
+ return set_start - ucount_start;
+ }
+ /*
+ * Trigger an assert failure; the above list should have been
+ * exhaustive.
+ */
+ unreachable();
+}
+
+static void
+expect_count_match_contiguous(fb_group_t *fb, size_t nbits, size_t set_start,
+ size_t set_end) {
+ for (size_t i = 0; i < nbits; i++) {
+ for (size_t j = i + 1; j <= nbits; j++) {
+ size_t cnt = j - i;
+ size_t scount_expected = scount_contiguous(set_start,
+ set_end, i, j);
+ size_t scount_computed = fb_scount(fb, nbits, i, cnt);
+ expect_zu_eq(scount_expected, scount_computed,
+ "fb_scount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with bits set in [%zu, %zu)",
+ nbits, i, cnt, set_start, set_end);
+
+ size_t ucount_expected = ucount_contiguous(set_start,
+ set_end, i, j);
+ size_t ucount_computed = fb_ucount(fb, nbits, i, cnt);
+ assert_zu_eq(ucount_expected, ucount_computed,
+ "fb_ucount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with bits set in [%zu, %zu)",
+ nbits, i, cnt, set_start, set_end);
+
+ }
+ }
+}
+
+static void
+do_test_count_contiguous(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+
+ fb_init(fb, nbits);
+
+ expect_count_match_contiguous(fb, nbits, 0, 0);
+ for (size_t i = 0; i < nbits; i++) {
+ fb_set(fb, nbits, i);
+ expect_count_match_contiguous(fb, nbits, 0, i + 1);
+ }
+
+ for (size_t i = 0; i < nbits; i++) {
+ fb_unset(fb, nbits, i);
+ expect_count_match_contiguous(fb, nbits, i + 1, nbits);
+ }
+
+ free(fb);
+}
+
+TEST_BEGIN(test_count_contiguous_simple) {
+ enum {nbits = 300};
+ fb_group_t fb[FB_NGROUPS(nbits)];
+ fb_init(fb, nbits);
+ /* Just an arbitrary number. */
+ size_t start = 23;
+
+ fb_set_range(fb, nbits, start, 30 - start);
+ expect_count_match_contiguous(fb, nbits, start, 30);
+
+ fb_set_range(fb, nbits, start, 40 - start);
+ expect_count_match_contiguous(fb, nbits, start, 40);
+
+ fb_set_range(fb, nbits, start, 70 - start);
+ expect_count_match_contiguous(fb, nbits, start, 70);
+
+ fb_set_range(fb, nbits, start, 120 - start);
+ expect_count_match_contiguous(fb, nbits, start, 120);
+
+ fb_set_range(fb, nbits, start, 150 - start);
+ expect_count_match_contiguous(fb, nbits, start, 150);
+
+ fb_set_range(fb, nbits, start, 200 - start);
+ expect_count_match_contiguous(fb, nbits, start, 200);
+
+ fb_set_range(fb, nbits, start, 290 - start);
+ expect_count_match_contiguous(fb, nbits, start, 290);
+}
+TEST_END
+
+TEST_BEGIN(test_count_contiguous) {
+#define NB(nbits) \
+ /* This test is *particularly* slow in debug builds. */ \
+ if ((!config_debug && nbits < 300) || nbits < 150) { \
+ do_test_count_contiguous(nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+expect_count_match_alternating(fb_group_t *fb_even, fb_group_t *fb_odd,
+ size_t nbits) {
+ for (size_t i = 0; i < nbits; i++) {
+ for (size_t j = i + 1; j <= nbits; j++) {
+ size_t cnt = j - i;
+ size_t odd_scount = cnt / 2
+ + (size_t)(cnt % 2 == 1 && i % 2 == 1);
+ size_t odd_scount_computed = fb_scount(fb_odd, nbits,
+ i, j - i);
+ assert_zu_eq(odd_scount, odd_scount_computed,
+ "fb_scount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with alternating bits set.",
+ nbits, i, j - i);
+
+ size_t odd_ucount = cnt / 2
+ + (size_t)(cnt % 2 == 1 && i % 2 == 0);
+ size_t odd_ucount_computed = fb_ucount(fb_odd, nbits,
+ i, j - i);
+ assert_zu_eq(odd_ucount, odd_ucount_computed,
+ "fb_ucount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with alternating bits set.",
+ nbits, i, j - i);
+
+ size_t even_scount = cnt / 2
+ + (size_t)(cnt % 2 == 1 && i % 2 == 0);
+ size_t even_scount_computed = fb_scount(fb_even, nbits,
+ i, j - i);
+ assert_zu_eq(even_scount, even_scount_computed,
+ "fb_scount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with alternating bits set.",
+ nbits, i, j - i);
+
+ size_t even_ucount = cnt / 2
+ + (size_t)(cnt % 2 == 1 && i % 2 == 1);
+ size_t even_ucount_computed = fb_ucount(fb_even, nbits,
+ i, j - i);
+ assert_zu_eq(even_ucount, even_ucount_computed,
+ "fb_ucount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with alternating bits set.",
+ nbits, i, j - i);
+ }
+ }
+}
+
+static void
+do_test_count_alternating(size_t nbits) {
+ if (nbits > 1000) {
+ return;
+ }
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb_even = malloc(sz);
+ fb_group_t *fb_odd = malloc(sz);
+
+ fb_init(fb_even, nbits);
+ fb_init(fb_odd, nbits);
+
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 2 == 0) {
+ fb_set(fb_even, nbits, i);
+ } else {
+ fb_set(fb_odd, nbits, i);
+ }
+ }
+
+ expect_count_match_alternating(fb_even, fb_odd, nbits);
+
+ free(fb_even);
+ free(fb_odd);
+}
+
+TEST_BEGIN(test_count_alternating) {
+#define NB(nbits) \
+ do_test_count_alternating(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+do_test_bit_op(size_t nbits, bool (*op)(bool a, bool b),
+ void (*fb_op)(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits)) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb1 = malloc(sz);
+ fb_group_t *fb2 = malloc(sz);
+ fb_group_t *fb_result = malloc(sz);
+ fb_init(fb1, nbits);
+ fb_init(fb2, nbits);
+ fb_init(fb_result, nbits);
+
+ /* Just two random numbers. */
+ const uint64_t prng_init1 = (uint64_t)0X4E9A9DE6A35691CDULL;
+ const uint64_t prng_init2 = (uint64_t)0X7856E396B063C36EULL;
+
+ uint64_t prng1 = prng_init1;
+ uint64_t prng2 = prng_init2;
+
+ for (size_t i = 0; i < nbits; i++) {
+ bool bit1 = ((prng1 & (1ULL << (i % 64))) != 0);
+ bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0);
+
+ if (bit1) {
+ fb_set(fb1, nbits, i);
+ }
+ if (bit2) {
+ fb_set(fb2, nbits, i);
+ }
+
+ if (i % 64 == 0) {
+ prng1 = prng_state_next_u64(prng1);
+ prng2 = prng_state_next_u64(prng2);
+ }
+ }
+
+ fb_op(fb_result, fb1, fb2, nbits);
+
+ /* Reset the prngs to replay them. */
+ prng1 = prng_init1;
+ prng2 = prng_init2;
+
+ for (size_t i = 0; i < nbits; i++) {
+ bool bit1 = ((prng1 & (1ULL << (i % 64))) != 0);
+ bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0);
+
+ /* Original bitmaps shouldn't change. */
+ expect_b_eq(bit1, fb_get(fb1, nbits, i), "difference at bit %zu", i);
+ expect_b_eq(bit2, fb_get(fb2, nbits, i), "difference at bit %zu", i);
+
+ /* New one should be bitwise and. */
+ expect_b_eq(op(bit1, bit2), fb_get(fb_result, nbits, i),
+ "difference at bit %zu", i);
+
+ /* Update the same way we did last time. */
+ if (i % 64 == 0) {
+ prng1 = prng_state_next_u64(prng1);
+ prng2 = prng_state_next_u64(prng2);
+ }
+ }
+
+ free(fb1);
+ free(fb2);
+ free(fb_result);
+}
+
+static bool
+binary_and(bool a, bool b) {
+ return a & b;
+}
+
+static void
+do_test_bit_and(size_t nbits) {
+ do_test_bit_op(nbits, &binary_and, &fb_bit_and);
+}
+
+TEST_BEGIN(test_bit_and) {
+#define NB(nbits) \
+ do_test_bit_and(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static bool
+binary_or(bool a, bool b) {
+ return a | b;
+}
+
+static void
+do_test_bit_or(size_t nbits) {
+ do_test_bit_op(nbits, &binary_or, &fb_bit_or);
+}
+
+TEST_BEGIN(test_bit_or) {
+#define NB(nbits) \
+ do_test_bit_or(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static bool
+binary_not(bool a, bool b) {
+ (void)b;
+ return !a;
+}
+
+static void
+fb_bit_not_shim(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2,
+ size_t nbits) {
+ (void)src2;
+ fb_bit_not(dst, src1, nbits);
+}
+
+static void
+do_test_bit_not(size_t nbits) {
+ do_test_bit_op(nbits, &binary_not, &fb_bit_not_shim);
+}
+
+TEST_BEGIN(test_bit_not) {
+#define NB(nbits) \
+ do_test_bit_not(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_fb_init,
+ test_get_set_unset,
+ test_search_simple,
+ test_search_exhaustive,
+ test_range_simple,
+ test_empty_full,
+ test_iter_range_simple,
+ test_iter_range_exhaustive,
+ test_count_contiguous_simple,
+ test_count_contiguous,
+ test_count_alternating,
+ test_bit_and,
+ test_bit_or,
+ test_bit_not);
+}
diff --git a/deps/jemalloc/test/unit/fork.c b/deps/jemalloc/test/unit/fork.c
new file mode 100644
index 0000000..4137423
--- /dev/null
+++ b/deps/jemalloc/test/unit/fork.c
@@ -0,0 +1,141 @@
+#include "test/jemalloc_test.h"
+
+#ifndef _WIN32
+#include <sys/wait.h>
+#endif
+
+#ifndef _WIN32
+static void
+wait_for_child_exit(int pid) {
+ int status;
+ while (true) {
+ if (waitpid(pid, &status, 0) == -1) {
+ test_fail("Unexpected waitpid() failure.");
+ }
+ if (WIFSIGNALED(status)) {
+ test_fail("Unexpected child termination due to "
+ "signal %d", WTERMSIG(status));
+ break;
+ }
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) != 0) {
+ test_fail("Unexpected child exit value %d",
+ WEXITSTATUS(status));
+ }
+ break;
+ }
+ }
+}
+#endif
+
+TEST_BEGIN(test_fork) {
+#ifndef _WIN32
+ void *p;
+ pid_t pid;
+
+ /* Set up a manually managed arena for test. */
+ unsigned arena_ind;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+
+ /* Migrate to the new arena. */
+ unsigned old_arena_ind;
+ sz = sizeof(old_arena_ind);
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ (void *)&arena_ind, sizeof(arena_ind)), 0,
+ "Unexpected mallctl() failure");
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+
+ pid = fork();
+
+ free(p);
+
+ p = malloc(64);
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+ free(p);
+
+ if (pid == -1) {
+ /* Error. */
+ test_fail("Unexpected fork() failure");
+ } else if (pid == 0) {
+ /* Child. */
+ _exit(0);
+ } else {
+ wait_for_child_exit(pid);
+ }
+#else
+ test_skip("fork(2) is irrelevant to Windows");
+#endif
+}
+TEST_END
+
+#ifndef _WIN32
+static void *
+do_fork_thd(void *arg) {
+ malloc(1);
+ int pid = fork();
+ if (pid == -1) {
+ /* Error. */
+ test_fail("Unexpected fork() failure");
+ } else if (pid == 0) {
+ /* Child. */
+ char *args[] = {"true", NULL};
+ execvp(args[0], args);
+ test_fail("Exec failed");
+ } else {
+ /* Parent */
+ wait_for_child_exit(pid);
+ }
+ return NULL;
+}
+#endif
+
+#ifndef _WIN32
+static void
+do_test_fork_multithreaded() {
+ thd_t child;
+ thd_create(&child, do_fork_thd, NULL);
+ do_fork_thd(NULL);
+ thd_join(child, NULL);
+}
+#endif
+
+TEST_BEGIN(test_fork_multithreaded) {
+#ifndef _WIN32
+ /*
+ * We've seen bugs involving hanging on arenas_lock (though the same
+ * class of bugs can happen on any mutex). The bugs are intermittent
+ * though, so we want to run the test multiple times. Since we hold the
+ * arenas lock only early in the process lifetime, we can't just run
+ * this test in a loop (since, after all the arenas are initialized, we
+ * won't acquire arenas_lock any further). We therefore repeat the test
+ * with multiple processes.
+ */
+ for (int i = 0; i < 100; i++) {
+ int pid = fork();
+ if (pid == -1) {
+ /* Error. */
+ test_fail("Unexpected fork() failure,");
+ } else if (pid == 0) {
+ /* Child. */
+ do_test_fork_multithreaded();
+ _exit(0);
+ } else {
+ wait_for_child_exit(pid);
+ }
+ }
+#else
+ test_skip("fork(2) is irrelevant to Windows");
+#endif
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_fork,
+ test_fork_multithreaded);
+}
diff --git a/deps/jemalloc/test/unit/fxp.c b/deps/jemalloc/test/unit/fxp.c
new file mode 100644
index 0000000..27f1097
--- /dev/null
+++ b/deps/jemalloc/test/unit/fxp.c
@@ -0,0 +1,394 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/fxp.h"
+
+static double
+fxp2double(fxp_t a) {
+ double intpart = (double)(a >> 16);
+ double fracpart = (double)(a & ((1U << 16) - 1)) / (1U << 16);
+ return intpart + fracpart;
+}
+
+/* Is a close to b? */
+static bool
+double_close(double a, double b) {
+ /*
+ * Our implementation doesn't try for precision. Correspondingly, don't
+ * enforce it too strenuously here; accept values that are close in
+ * either relative or absolute terms.
+ */
+ return fabs(a - b) < 0.01 || fabs(a - b) / a < 0.01;
+}
+
+static bool
+fxp_close(fxp_t a, fxp_t b) {
+ return double_close(fxp2double(a), fxp2double(b));
+}
+
+static fxp_t
+xparse_fxp(const char *str) {
+ fxp_t result;
+ bool err = fxp_parse(&result, str, NULL);
+ assert_false(err, "Invalid fxp string: %s", str);
+ return result;
+}
+
+static void
+expect_parse_accurate(const char *str, const char *parse_str) {
+ double true_val = strtod(str, NULL);
+ fxp_t fxp_val;
+ char *end;
+ bool err = fxp_parse(&fxp_val, parse_str, &end);
+ expect_false(err, "Unexpected parse failure");
+ expect_ptr_eq(parse_str + strlen(str), end,
+ "Didn't parse whole string");
+ expect_true(double_close(fxp2double(fxp_val), true_val),
+ "Misparsed %s", str);
+}
+
+static void
+parse_valid_trial(const char *str) {
+ /* The value it parses should be correct. */
+ expect_parse_accurate(str, str);
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%swith_some_trailing_text", str);
+ expect_parse_accurate(str, buf);
+ snprintf(buf, sizeof(buf), "%s with a space", str);
+ expect_parse_accurate(str, buf);
+ snprintf(buf, sizeof(buf), "%s,in_a_malloc_conf_string:1", str);
+ expect_parse_accurate(str, buf);
+}
+
+TEST_BEGIN(test_parse_valid) {
+ parse_valid_trial("0");
+ parse_valid_trial("1");
+ parse_valid_trial("2");
+ parse_valid_trial("100");
+ parse_valid_trial("345");
+ parse_valid_trial("00000000123");
+ parse_valid_trial("00000000987");
+
+ parse_valid_trial("0.0");
+ parse_valid_trial("0.00000000000456456456");
+ parse_valid_trial("100.00000000000456456456");
+
+ parse_valid_trial("123.1");
+ parse_valid_trial("123.01");
+ parse_valid_trial("123.001");
+ parse_valid_trial("123.0001");
+ parse_valid_trial("123.00001");
+ parse_valid_trial("123.000001");
+ parse_valid_trial("123.0000001");
+
+ parse_valid_trial(".0");
+ parse_valid_trial(".1");
+ parse_valid_trial(".01");
+ parse_valid_trial(".001");
+ parse_valid_trial(".0001");
+ parse_valid_trial(".00001");
+ parse_valid_trial(".000001");
+
+ parse_valid_trial(".1");
+ parse_valid_trial(".10");
+ parse_valid_trial(".100");
+ parse_valid_trial(".1000");
+ parse_valid_trial(".100000");
+}
+TEST_END
+
+static void
+expect_parse_failure(const char *str) {
+ fxp_t result = FXP_INIT_INT(333);
+ char *end = (void *)0x123;
+ bool err = fxp_parse(&result, str, &end);
+ expect_true(err, "Expected a parse error on: %s", str);
+ expect_ptr_eq((void *)0x123, end,
+ "Parse error shouldn't change results");
+ expect_u32_eq(result, FXP_INIT_INT(333),
+ "Parse error shouldn't change results");
+}
+
+TEST_BEGIN(test_parse_invalid) {
+ expect_parse_failure("123.");
+ expect_parse_failure("3.a");
+ expect_parse_failure(".a");
+ expect_parse_failure("a.1");
+ expect_parse_failure("a");
+ /* A valid string, but one that overflows. */
+ expect_parse_failure("123456789");
+ expect_parse_failure("0000000123456789");
+ expect_parse_failure("1000000");
+}
+TEST_END
+
+static void
+expect_init_percent(unsigned percent, const char *str) {
+ fxp_t result_init = FXP_INIT_PERCENT(percent);
+ fxp_t result_parse = xparse_fxp(str);
+ expect_u32_eq(result_init, result_parse,
+ "Expect representations of FXP_INIT_PERCENT(%u) and "
+ "fxp_parse(\"%s\") to be equal; got %x and %x",
+ percent, str, result_init, result_parse);
+
+}
+
+/*
+ * Every other test uses either parsing or FXP_INIT_INT; it gets tested in those
+ * ways. We need a one-off for the percent-based initialization, though.
+ */
+TEST_BEGIN(test_init_percent) {
+ expect_init_percent(100, "1");
+ expect_init_percent(75, ".75");
+ expect_init_percent(1, ".01");
+ expect_init_percent(50, ".5");
+}
+TEST_END
+
+static void
+expect_add(const char *astr, const char *bstr, const char* resultstr) {
+ fxp_t a = xparse_fxp(astr);
+ fxp_t b = xparse_fxp(bstr);
+ fxp_t result = xparse_fxp(resultstr);
+ expect_true(fxp_close(fxp_add(a, b), result),
+ "Expected %s + %s == %s", astr, bstr, resultstr);
+}
+
+TEST_BEGIN(test_add_simple) {
+ expect_add("0", "0", "0");
+ expect_add("0", "1", "1");
+ expect_add("1", "1", "2");
+ expect_add("1.5", "1.5", "3");
+ expect_add("0.1", "0.1", "0.2");
+ expect_add("123", "456", "579");
+}
+TEST_END
+
+static void
+expect_sub(const char *astr, const char *bstr, const char* resultstr) {
+ fxp_t a = xparse_fxp(astr);
+ fxp_t b = xparse_fxp(bstr);
+ fxp_t result = xparse_fxp(resultstr);
+ expect_true(fxp_close(fxp_sub(a, b), result),
+ "Expected %s - %s == %s", astr, bstr, resultstr);
+}
+
+TEST_BEGIN(test_sub_simple) {
+ expect_sub("0", "0", "0");
+ expect_sub("1", "0", "1");
+ expect_sub("1", "1", "0");
+ expect_sub("3.5", "1.5", "2");
+ expect_sub("0.3", "0.1", "0.2");
+ expect_sub("456", "123", "333");
+}
+TEST_END
+
+static void
+expect_mul(const char *astr, const char *bstr, const char* resultstr) {
+ fxp_t a = xparse_fxp(astr);
+ fxp_t b = xparse_fxp(bstr);
+ fxp_t result = xparse_fxp(resultstr);
+ expect_true(fxp_close(fxp_mul(a, b), result),
+ "Expected %s * %s == %s", astr, bstr, resultstr);
+}
+
+TEST_BEGIN(test_mul_simple) {
+ expect_mul("0", "0", "0");
+ expect_mul("1", "0", "0");
+ expect_mul("1", "1", "1");
+ expect_mul("1.5", "1.5", "2.25");
+ expect_mul("100.0", "10", "1000");
+ expect_mul(".1", "10", "1");
+}
+TEST_END
+
+static void
+expect_div(const char *astr, const char *bstr, const char* resultstr) {
+ fxp_t a = xparse_fxp(astr);
+ fxp_t b = xparse_fxp(bstr);
+ fxp_t result = xparse_fxp(resultstr);
+ expect_true(fxp_close(fxp_div(a, b), result),
+ "Expected %s / %s == %s", astr, bstr, resultstr);
+}
+
+TEST_BEGIN(test_div_simple) {
+ expect_div("1", "1", "1");
+ expect_div("0", "1", "0");
+ expect_div("2", "1", "2");
+ expect_div("3", "2", "1.5");
+ expect_div("3", "1.5", "2");
+ expect_div("10", ".1", "100");
+ expect_div("123", "456", ".2697368421");
+}
+TEST_END
+
+static void
+expect_round(const char *str, uint32_t rounded_down, uint32_t rounded_nearest) {
+ fxp_t fxp = xparse_fxp(str);
+ uint32_t fxp_rounded_down = fxp_round_down(fxp);
+ uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp);
+ expect_u32_eq(rounded_down, fxp_rounded_down,
+ "Mistake rounding %s down", str);
+ expect_u32_eq(rounded_nearest, fxp_rounded_nearest,
+ "Mistake rounding %s to nearest", str);
+}
+
+TEST_BEGIN(test_round_simple) {
+ expect_round("1.5", 1, 2);
+ expect_round("0", 0, 0);
+ expect_round("0.1", 0, 0);
+ expect_round("0.4", 0, 0);
+ expect_round("0.40000", 0, 0);
+ expect_round("0.5", 0, 1);
+ expect_round("0.6", 0, 1);
+ expect_round("123", 123, 123);
+ expect_round("123.4", 123, 123);
+ expect_round("123.5", 123, 124);
+}
+TEST_END
+
+static void
+expect_mul_frac(size_t a, const char *fracstr, size_t expected) {
+ fxp_t frac = xparse_fxp(fracstr);
+ size_t result = fxp_mul_frac(a, frac);
+ expect_true(double_close(expected, result),
+ "Expected %zu * %s == %zu (fracmul); got %zu", a, fracstr,
+ expected, result);
+}
+
+TEST_BEGIN(test_mul_frac_simple) {
+ expect_mul_frac(SIZE_MAX, "1.0", SIZE_MAX);
+ expect_mul_frac(SIZE_MAX, ".75", SIZE_MAX / 4 * 3);
+ expect_mul_frac(SIZE_MAX, ".5", SIZE_MAX / 2);
+ expect_mul_frac(SIZE_MAX, ".25", SIZE_MAX / 4);
+ expect_mul_frac(1U << 16, "1.0", 1U << 16);
+ expect_mul_frac(1U << 30, "0.5", 1U << 29);
+ expect_mul_frac(1U << 30, "0.25", 1U << 28);
+ expect_mul_frac(1U << 30, "0.125", 1U << 27);
+ expect_mul_frac((1U << 30) + 1, "0.125", 1U << 27);
+ expect_mul_frac(100, "0.25", 25);
+ expect_mul_frac(1000 * 1000, "0.001", 1000);
+}
+TEST_END
+
+static void
+expect_print(const char *str) {
+ fxp_t fxp = xparse_fxp(str);
+ char buf[FXP_BUF_SIZE];
+ fxp_print(fxp, buf);
+ expect_d_eq(0, strcmp(str, buf), "Couldn't round-trip print %s", str);
+}
+
+TEST_BEGIN(test_print_simple) {
+ expect_print("0.0");
+ expect_print("1.0");
+ expect_print("2.0");
+ expect_print("123.0");
+ /*
+ * We hit the possibility of roundoff errors whenever the fractional
+ * component isn't a round binary number; only check these here (we
+ * round-trip properly in the stress test).
+ */
+ expect_print("1.5");
+ expect_print("3.375");
+ expect_print("0.25");
+ expect_print("0.125");
+ /* 1 / 2**14 */
+ expect_print("0.00006103515625");
+}
+TEST_END
+
+TEST_BEGIN(test_stress) {
+ const char *numbers[] = {
+ "0.0", "0.1", "0.2", "0.3", "0.4",
+ "0.5", "0.6", "0.7", "0.8", "0.9",
+
+ "1.0", "1.1", "1.2", "1.3", "1.4",
+ "1.5", "1.6", "1.7", "1.8", "1.9",
+
+ "2.0", "2.1", "2.2", "2.3", "2.4",
+ "2.5", "2.6", "2.7", "2.8", "2.9",
+
+ "17.0", "17.1", "17.2", "17.3", "17.4",
+ "17.5", "17.6", "17.7", "17.8", "17.9",
+
+ "18.0", "18.1", "18.2", "18.3", "18.4",
+ "18.5", "18.6", "18.7", "18.8", "18.9",
+
+ "123.0", "123.1", "123.2", "123.3", "123.4",
+ "123.5", "123.6", "123.7", "123.8", "123.9",
+
+ "124.0", "124.1", "124.2", "124.3", "124.4",
+ "124.5", "124.6", "124.7", "124.8", "124.9",
+
+ "125.0", "125.1", "125.2", "125.3", "125.4",
+ "125.5", "125.6", "125.7", "125.8", "125.9"};
+ size_t numbers_len = sizeof(numbers)/sizeof(numbers[0]);
+ for (size_t i = 0; i < numbers_len; i++) {
+ fxp_t fxp_a = xparse_fxp(numbers[i]);
+ double double_a = strtod(numbers[i], NULL);
+
+ uint32_t fxp_rounded_down = fxp_round_down(fxp_a);
+ uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp_a);
+ uint32_t double_rounded_down = (uint32_t)double_a;
+ uint32_t double_rounded_nearest = (uint32_t)round(double_a);
+
+ expect_u32_eq(double_rounded_down, fxp_rounded_down,
+ "Incorrectly rounded down %s", numbers[i]);
+ expect_u32_eq(double_rounded_nearest, fxp_rounded_nearest,
+ "Incorrectly rounded-to-nearest %s", numbers[i]);
+
+ for (size_t j = 0; j < numbers_len; j++) {
+ fxp_t fxp_b = xparse_fxp(numbers[j]);
+ double double_b = strtod(numbers[j], NULL);
+
+ fxp_t fxp_sum = fxp_add(fxp_a, fxp_b);
+ double double_sum = double_a + double_b;
+ expect_true(
+ double_close(fxp2double(fxp_sum), double_sum),
+ "Miscomputed %s + %s", numbers[i], numbers[j]);
+
+ if (double_a > double_b) {
+ fxp_t fxp_diff = fxp_sub(fxp_a, fxp_b);
+ double double_diff = double_a - double_b;
+ expect_true(
+ double_close(fxp2double(fxp_diff),
+ double_diff),
+ "Miscomputed %s - %s", numbers[i],
+ numbers[j]);
+ }
+
+ fxp_t fxp_prod = fxp_mul(fxp_a, fxp_b);
+ double double_prod = double_a * double_b;
+ expect_true(
+ double_close(fxp2double(fxp_prod), double_prod),
+ "Miscomputed %s * %s", numbers[i], numbers[j]);
+
+ if (double_b != 0.0) {
+ fxp_t fxp_quot = fxp_div(fxp_a, fxp_b);
+ double double_quot = double_a / double_b;
+ expect_true(
+ double_close(fxp2double(fxp_quot),
+ double_quot),
+ "Miscomputed %s / %s", numbers[i],
+ numbers[j]);
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_parse_valid,
+ test_parse_invalid,
+ test_init_percent,
+ test_add_simple,
+ test_sub_simple,
+ test_mul_simple,
+ test_div_simple,
+ test_round_simple,
+ test_mul_frac_simple,
+ test_print_simple,
+ test_stress);
+}
diff --git a/deps/jemalloc/test/unit/hash.c b/deps/jemalloc/test/unit/hash.c
new file mode 100644
index 0000000..49f0823
--- /dev/null
+++ b/deps/jemalloc/test/unit/hash.c
@@ -0,0 +1,173 @@
+/*
+ * This file is based on code that is part of SMHasher
+ * (https://code.google.com/p/smhasher/), and is subject to the MIT license
+ * (http://www.opensource.org/licenses/mit-license.php). Both email addresses
+ * associated with the source code's revision history belong to Austin Appleby,
+ * and the revision history ranges from 2010 to 2012. Therefore the copyright
+ * and license are here taken to be:
+ *
+ * Copyright (c) 2010-2012 Austin Appleby
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "test/jemalloc_test.h"
+#include "jemalloc/internal/hash.h"
+
+typedef enum {
+ hash_variant_x86_32,
+ hash_variant_x86_128,
+ hash_variant_x64_128
+} hash_variant_t;
+
+static int
+hash_variant_bits(hash_variant_t variant) {
+ switch (variant) {
+ case hash_variant_x86_32: return 32;
+ case hash_variant_x86_128: return 128;
+ case hash_variant_x64_128: return 128;
+ default: not_reached();
+ }
+}
+
+static const char *
+hash_variant_string(hash_variant_t variant) {
+ switch (variant) {
+ case hash_variant_x86_32: return "hash_x86_32";
+ case hash_variant_x86_128: return "hash_x86_128";
+ case hash_variant_x64_128: return "hash_x64_128";
+ default: not_reached();
+ }
+}
+
+#define KEY_SIZE 256
+static void
+hash_variant_verify_key(hash_variant_t variant, uint8_t *key) {
+ const int hashbytes = hash_variant_bits(variant) / 8;
+ const int hashes_size = hashbytes * 256;
+ VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
+ VARIABLE_ARRAY(uint8_t, final, hashbytes);
+ unsigned i;
+ uint32_t computed, expected;
+
+ memset(key, 0, KEY_SIZE);
+ memset(hashes, 0, hashes_size);
+ memset(final, 0, hashbytes);
+
+ /*
+ * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
+ * seed.
+ */
+ for (i = 0; i < 256; i++) {
+ key[i] = (uint8_t)i;
+ switch (variant) {
+ case hash_variant_x86_32: {
+ uint32_t out;
+ out = hash_x86_32(key, i, 256-i);
+ memcpy(&hashes[i*hashbytes], &out, hashbytes);
+ break;
+ } case hash_variant_x86_128: {
+ uint64_t out[2];
+ hash_x86_128(key, i, 256-i, out);
+ memcpy(&hashes[i*hashbytes], out, hashbytes);
+ break;
+ } case hash_variant_x64_128: {
+ uint64_t out[2];
+ hash_x64_128(key, i, 256-i, out);
+ memcpy(&hashes[i*hashbytes], out, hashbytes);
+ break;
+ } default: not_reached();
+ }
+ }
+
+ /* Hash the result array. */
+ switch (variant) {
+ case hash_variant_x86_32: {
+ uint32_t out = hash_x86_32(hashes, hashes_size, 0);
+ memcpy(final, &out, sizeof(out));
+ break;
+ } case hash_variant_x86_128: {
+ uint64_t out[2];
+ hash_x86_128(hashes, hashes_size, 0, out);
+ memcpy(final, out, sizeof(out));
+ break;
+ } case hash_variant_x64_128: {
+ uint64_t out[2];
+ hash_x64_128(hashes, hashes_size, 0, out);
+ memcpy(final, out, sizeof(out));
+ break;
+ } default: not_reached();
+ }
+
+ computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
+ (final[3] << 24);
+
+ switch (variant) {
+#ifdef JEMALLOC_BIG_ENDIAN
+ case hash_variant_x86_32: expected = 0x6213303eU; break;
+ case hash_variant_x86_128: expected = 0x266820caU; break;
+ case hash_variant_x64_128: expected = 0xcc622b6fU; break;
+#else
+ case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
+ case hash_variant_x86_128: expected = 0xb3ece62aU; break;
+ case hash_variant_x64_128: expected = 0x6384ba69U; break;
+#endif
+ default: not_reached();
+ }
+
+ expect_u32_eq(computed, expected,
+ "Hash mismatch for %s(): expected %#x but got %#x",
+ hash_variant_string(variant), expected, computed);
+}
+
+static void
+hash_variant_verify(hash_variant_t variant) {
+#define MAX_ALIGN 16
+ uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
+ unsigned i;
+
+ for (i = 0; i < MAX_ALIGN; i++) {
+ hash_variant_verify_key(variant, &key[i]);
+ }
+#undef MAX_ALIGN
+}
+#undef KEY_SIZE
+
+TEST_BEGIN(test_hash_x86_32) {
+ hash_variant_verify(hash_variant_x86_32);
+}
+TEST_END
+
+TEST_BEGIN(test_hash_x86_128) {
+ hash_variant_verify(hash_variant_x86_128);
+}
+TEST_END
+
+TEST_BEGIN(test_hash_x64_128) {
+ hash_variant_verify(hash_variant_x64_128);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_hash_x86_32,
+ test_hash_x86_128,
+ test_hash_x64_128);
+}
diff --git a/deps/jemalloc/test/unit/hook.c b/deps/jemalloc/test/unit/hook.c
new file mode 100644
index 0000000..16a6f1b
--- /dev/null
+++ b/deps/jemalloc/test/unit/hook.c
@@ -0,0 +1,586 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/hook.h"
+
+static void *arg_extra;
+static int arg_type;
+static void *arg_result;
+static void *arg_address;
+static size_t arg_old_usize;
+static size_t arg_new_usize;
+static uintptr_t arg_result_raw;
+static uintptr_t arg_args_raw[4];
+
+static int call_count = 0;
+
+static void
+reset_args() {
+ arg_extra = NULL;
+ arg_type = 12345;
+ arg_result = NULL;
+ arg_address = NULL;
+ arg_old_usize = 0;
+ arg_new_usize = 0;
+ arg_result_raw = 0;
+ memset(arg_args_raw, 77, sizeof(arg_args_raw));
+}
+
+static void
+alloc_free_size(size_t sz) {
+ void *ptr = mallocx(1, 0);
+ free(ptr);
+ ptr = mallocx(1, 0);
+ free(ptr);
+ ptr = mallocx(1, MALLOCX_TCACHE_NONE);
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+}
+
+/*
+ * We want to support a degree of user reentrancy. This tests a variety of
+ * allocation scenarios.
+ */
+static void
+be_reentrant() {
+ /* Let's make sure the tcache is non-empty if enabled. */
+ alloc_free_size(1);
+ alloc_free_size(1024);
+ alloc_free_size(64 * 1024);
+ alloc_free_size(256 * 1024);
+ alloc_free_size(1024 * 1024);
+
+ /* Some reallocation. */
+ void *ptr = mallocx(129, 0);
+ ptr = rallocx(ptr, 130, 0);
+ free(ptr);
+
+ ptr = mallocx(2 * 1024 * 1024, 0);
+ free(ptr);
+ ptr = mallocx(1 * 1024 * 1024, 0);
+ ptr = rallocx(ptr, 2 * 1024 * 1024, 0);
+ free(ptr);
+
+ ptr = mallocx(1, 0);
+ ptr = rallocx(ptr, 1000, 0);
+ free(ptr);
+}
+
+static void
+set_args_raw(uintptr_t *args_raw, int nargs) {
+ memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs);
+}
+
+static void
+expect_args_raw(uintptr_t *args_raw_expected, int nargs) {
+ int cmp = memcmp(args_raw_expected, arg_args_raw,
+ sizeof(uintptr_t) * nargs);
+ expect_d_eq(cmp, 0, "Raw args mismatch");
+}
+
+static void
+reset() {
+ call_count = 0;
+ reset_args();
+}
+
+static void
+test_alloc_hook(void *extra, hook_alloc_t type, void *result,
+ uintptr_t result_raw, uintptr_t args_raw[3]) {
+ call_count++;
+ arg_extra = extra;
+ arg_type = (int)type;
+ arg_result = result;
+ arg_result_raw = result_raw;
+ set_args_raw(args_raw, 3);
+ be_reentrant();
+}
+
+static void
+test_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
+ uintptr_t args_raw[3]) {
+ call_count++;
+ arg_extra = extra;
+ arg_type = (int)type;
+ arg_address = address;
+ set_args_raw(args_raw, 3);
+ be_reentrant();
+}
+
+static void
+test_expand_hook(void *extra, hook_expand_t type, void *address,
+ size_t old_usize, size_t new_usize, uintptr_t result_raw,
+ uintptr_t args_raw[4]) {
+ call_count++;
+ arg_extra = extra;
+ arg_type = (int)type;
+ arg_address = address;
+ arg_old_usize = old_usize;
+ arg_new_usize = new_usize;
+ arg_result_raw = result_raw;
+ set_args_raw(args_raw, 4);
+ be_reentrant();
+}
+
+TEST_BEGIN(test_hooks_basic) {
+ /* Just verify that the record their arguments correctly. */
+ hooks_t hooks = {
+ &test_alloc_hook, &test_dalloc_hook, &test_expand_hook,
+ (void *)111};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ uintptr_t args_raw[4] = {10, 20, 30, 40};
+
+ /* Alloc */
+ reset_args();
+ hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333,
+ args_raw);
+ expect_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
+ expect_d_eq((int)hook_alloc_posix_memalign, arg_type,
+ "Passed wrong alloc type");
+ expect_ptr_eq((void *)222, arg_result, "Passed wrong result address");
+ expect_u64_eq(333, arg_result_raw, "Passed wrong result");
+ expect_args_raw(args_raw, 3);
+
+ /* Dalloc */
+ reset_args();
+ hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw);
+ expect_d_eq((int)hook_dalloc_sdallocx, arg_type,
+ "Passed wrong dalloc type");
+ expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
+ expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
+ expect_args_raw(args_raw, 3);
+
+ /* Expand */
+ reset_args();
+ hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555,
+ args_raw);
+ expect_d_eq((int)hook_expand_xallocx, arg_type,
+ "Passed wrong expand type");
+ expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
+ expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
+ expect_zu_eq(333, arg_old_usize, "Passed wrong old usize");
+ expect_zu_eq(444, arg_new_usize, "Passed wrong new usize");
+ expect_zu_eq(555, arg_result_raw, "Passed wrong result");
+ expect_args_raw(args_raw, 4);
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_null) {
+ /* Null hooks should be ignored, not crash. */
+ hooks_t hooks1 = {NULL, NULL, NULL, NULL};
+ hooks_t hooks2 = {&test_alloc_hook, NULL, NULL, NULL};
+ hooks_t hooks3 = {NULL, &test_dalloc_hook, NULL, NULL};
+ hooks_t hooks4 = {NULL, NULL, &test_expand_hook, NULL};
+
+ void *handle1 = hook_install(TSDN_NULL, &hooks1);
+ void *handle2 = hook_install(TSDN_NULL, &hooks2);
+ void *handle3 = hook_install(TSDN_NULL, &hooks3);
+ void *handle4 = hook_install(TSDN_NULL, &hooks4);
+
+ expect_ptr_ne(handle1, NULL, "Hook installation failed");
+ expect_ptr_ne(handle2, NULL, "Hook installation failed");
+ expect_ptr_ne(handle3, NULL, "Hook installation failed");
+ expect_ptr_ne(handle4, NULL, "Hook installation failed");
+
+ uintptr_t args_raw[4] = {10, 20, 30, 40};
+
+ call_count = 0;
+ hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
+ expect_d_eq(call_count, 1, "Called wrong number of times");
+
+ call_count = 0;
+ hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw);
+ expect_d_eq(call_count, 1, "Called wrong number of times");
+
+ call_count = 0;
+ hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw);
+ expect_d_eq(call_count, 1, "Called wrong number of times");
+
+ hook_remove(TSDN_NULL, handle1);
+ hook_remove(TSDN_NULL, handle2);
+ hook_remove(TSDN_NULL, handle3);
+ hook_remove(TSDN_NULL, handle4);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_remove) {
+ hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
+ call_count = 0;
+ uintptr_t args_raw[4] = {10, 20, 30, 40};
+ hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
+ expect_d_eq(call_count, 1, "Hook not invoked");
+
+ call_count = 0;
+ hook_remove(TSDN_NULL, handle);
+ hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL);
+ expect_d_eq(call_count, 0, "Hook invoked after removal");
+
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_alloc_simple) {
+ /* "Simple" in the sense that we're not in a realloc variant. */
+ hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
+
+ /* Stop malloc from being optimized away. */
+ volatile int err;
+ void *volatile ptr;
+
+ /* malloc */
+ reset();
+ ptr = malloc(1);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ free(ptr);
+
+ /* posix_memalign */
+ reset();
+ err = posix_memalign((void **)&ptr, 1024, 1);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_posix_memalign,
+ "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
+ free(ptr);
+
+ /* aligned_alloc */
+ reset();
+ ptr = aligned_alloc(1024, 1);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
+ "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ /* calloc */
+ reset();
+ ptr = calloc(11, 13);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ /* memalign */
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+ reset();
+ ptr = memalign(1024, 1);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+#endif /* JEMALLOC_OVERRIDE_MEMALIGN */
+
+ /* valloc */
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+ reset();
+ ptr = valloc(1);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ free(ptr);
+#endif /* JEMALLOC_OVERRIDE_VALLOC */
+
+ /* mallocx */
+ reset();
+ ptr = mallocx(1, MALLOCX_LG_ALIGN(10));
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
+ "Wrong flags");
+ free(ptr);
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_dalloc_simple) {
+ /* "Simple" in the sense that we're not in a realloc variant. */
+ hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
+
+ void *volatile ptr;
+
+ /* free() */
+ reset();
+ ptr = malloc(1);
+ free(ptr);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+
+ /* dallocx() */
+ reset();
+ ptr = malloc(1);
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+ expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
+ "Wrong raw arg");
+
+ /* sdallocx() */
+ reset();
+ ptr = malloc(1);
+ sdallocx(ptr, 1, MALLOCX_TCACHE_NONE);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
+ expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
+ "Wrong raw arg");
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_expand_simple) {
+ /* "Simple" in the sense that we're not in a realloc variant. */
+ hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
+
+ void *volatile ptr;
+
+ /* xallocx() */
+ reset();
+ ptr = malloc(1);
+ size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
+ expect_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
+ expect_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
+ expect_u64_eq(new_usize, arg_result_raw, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
+ expect_u64_eq(100, arg_args_raw[1], "Wrong arg");
+ expect_u64_eq(200, arg_args_raw[2], "Wrong arg");
+ expect_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
+ hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
+ &test_expand_hook, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
+
+ void *volatile ptr;
+
+ /* realloc(NULL, size) as malloc */
+ reset();
+ ptr = realloc(NULL, 1);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ /* realloc(ptr, 0) as free */
+ if (opt_zero_realloc_action == zero_realloc_action_free) {
+ ptr = malloc(1);
+ reset();
+ realloc(ptr, 0);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_dalloc_realloc,
+ "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address,
+ "Wrong pointer freed");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0],
+ "Wrong raw arg");
+ expect_u64_eq((uintptr_t)0, arg_args_raw[1],
+ "Wrong raw arg");
+ }
+
+ /* realloc(NULL, 0) as malloc(0) */
+ reset();
+ ptr = realloc(NULL, 0);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+static void
+do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
+ int expand_type, int dalloc_type) {
+ hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
+ &test_expand_hook, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
+
+ void *volatile ptr;
+ void *volatile ptr2;
+
+ /* Realloc in-place, small. */
+ ptr = malloc(129);
+ reset();
+ ptr2 = ralloc(ptr, 130, flags);
+ expect_ptr_eq(ptr, ptr2, "Small realloc moved");
+
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, expand_type, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong address");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ /*
+ * Realloc in-place, large. Since we can't guarantee the large case
+ * across all platforms, we stay resilient to moving results.
+ */
+ ptr = malloc(2 * 1024 * 1024);
+ free(ptr);
+ ptr2 = malloc(1 * 1024 * 1024);
+ reset();
+ ptr = ralloc(ptr2, 2 * 1024 * 1024, flags);
+ /* ptr is the new address, ptr2 is the old address. */
+ if (ptr == ptr2) {
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_d_eq(arg_type, expand_type, "Wrong hook type");
+ } else {
+ expect_d_eq(call_count, 2, "Wrong hooks called");
+ expect_ptr_eq(ptr, arg_result, "Wrong address");
+ expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ }
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_ptr_eq(ptr2, arg_address, "Wrong address");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
+ "Wrong argument");
+ free(ptr);
+
+ /* Realloc with move, small. */
+ ptr = malloc(8);
+ reset();
+ ptr2 = ralloc(ptr, 128, flags);
+ expect_ptr_ne(ptr, ptr2, "Small realloc didn't move");
+
+ expect_d_eq(call_count, 2, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong address");
+ expect_ptr_eq(ptr2, arg_result, "Wrong address");
+ expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
+ free(ptr2);
+
+ /* Realloc with move, large. */
+ ptr = malloc(1);
+ reset();
+ ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags);
+ expect_ptr_ne(ptr, ptr2, "Large realloc didn't move");
+
+ expect_d_eq(call_count, 2, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong address");
+ expect_ptr_eq(ptr2, arg_result, "Wrong address");
+ expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
+ "Wrong argument");
+ free(ptr2);
+
+ hook_remove(TSDN_NULL, handle);
+}
+
+static void *
+realloc_wrapper(void *ptr, size_t size, UNUSED int flags) {
+ return realloc(ptr, size);
+}
+
+TEST_BEGIN(test_hooks_realloc) {
+ do_realloc_test(&realloc_wrapper, 0, hook_expand_realloc,
+ hook_dalloc_realloc);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_rallocx) {
+ do_realloc_test(&rallocx, MALLOCX_TCACHE_NONE, hook_expand_rallocx,
+ hook_dalloc_rallocx);
+}
+TEST_END
+
+int
+main(void) {
+ /* We assert on call counts. */
+ return test_no_reentrancy(
+ test_hooks_basic,
+ test_hooks_null,
+ test_hooks_remove,
+ test_hooks_alloc_simple,
+ test_hooks_dalloc_simple,
+ test_hooks_expand_simple,
+ test_hooks_realloc_as_malloc_or_free,
+ test_hooks_realloc,
+ test_hooks_rallocx);
+}
diff --git a/deps/jemalloc/test/unit/hpa.c b/deps/jemalloc/test/unit/hpa.c
new file mode 100644
index 0000000..dfd57f3
--- /dev/null
+++ b/deps/jemalloc/test/unit/hpa.c
@@ -0,0 +1,459 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/hpa.h"
+#include "jemalloc/internal/nstime.h"
+
+#define SHARD_IND 111
+
+#define ALLOC_MAX (HUGEPAGE / 4)
+
+typedef struct test_data_s test_data_t;
+struct test_data_s {
+ /*
+ * Must be the first member -- we convert back and forth between the
+ * test_data_t and the hpa_shard_t;
+ */
+ hpa_shard_t shard;
+ hpa_central_t central;
+ base_t *base;
+ edata_cache_t shard_edata_cache;
+
+ emap_t emap;
+};
+
+static hpa_shard_opts_t test_hpa_shard_opts_default = {
+ /* slab_max_alloc */
+ ALLOC_MAX,
+ /* hugification threshold */
+ HUGEPAGE,
+ /* dirty_mult */
+ FXP_INIT_PERCENT(25),
+ /* deferral_allowed */
+ false,
+ /* hugify_delay_ms */
+ 10 * 1000,
+};
+
+static hpa_shard_t *
+create_test_data(hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
+ bool err;
+ base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
+ &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
+ assert_ptr_not_null(base, "");
+
+ test_data_t *test_data = malloc(sizeof(test_data_t));
+ assert_ptr_not_null(test_data, "");
+
+ test_data->base = base;
+
+ err = edata_cache_init(&test_data->shard_edata_cache, base);
+ assert_false(err, "");
+
+ err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
+ assert_false(err, "");
+
+ err = hpa_central_init(&test_data->central, test_data->base, hooks);
+ assert_false(err, "");
+
+ err = hpa_shard_init(&test_data->shard, &test_data->central,
+ &test_data->emap, test_data->base, &test_data->shard_edata_cache,
+ SHARD_IND, opts);
+ assert_false(err, "");
+
+ return (hpa_shard_t *)test_data;
+}
+
+static void
+destroy_test_data(hpa_shard_t *shard) {
+ test_data_t *test_data = (test_data_t *)shard;
+ base_delete(TSDN_NULL, test_data->base);
+ free(test_data);
+}
+
+TEST_BEGIN(test_alloc_max) {
+ test_skip_if(!hpa_supported());
+
+ hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
+ &test_hpa_shard_opts_default);
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+
+ edata_t *edata;
+
+ /* Small max */
+ bool deferred_work_generated = false;
+ edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
+ false, &deferred_work_generated);
+ expect_ptr_not_null(edata, "Allocation of small max failed");
+ edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
+ false, false, &deferred_work_generated);
+ expect_ptr_null(edata, "Allocation of larger than small max succeeded");
+
+ destroy_test_data(shard);
+}
+TEST_END
+
+typedef struct mem_contents_s mem_contents_t;
+struct mem_contents_s {
+ uintptr_t my_addr;
+ size_t size;
+ edata_t *my_edata;
+ rb_node(mem_contents_t) link;
+};
+
+static int
+mem_contents_cmp(const mem_contents_t *a, const mem_contents_t *b) {
+ return (a->my_addr > b->my_addr) - (a->my_addr < b->my_addr);
+}
+
+typedef rb_tree(mem_contents_t) mem_tree_t;
+rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link,
+ mem_contents_cmp);
+
+static void
+node_assert_ordered(mem_contents_t *a, mem_contents_t *b) {
+ assert_zu_lt(a->my_addr, a->my_addr + a->size, "Overflow");
+ assert_zu_le(a->my_addr + a->size, b->my_addr, "");
+}
+
+static void
+node_check(mem_tree_t *tree, mem_contents_t *contents) {
+ edata_t *edata = contents->my_edata;
+ assert_ptr_eq(contents, (void *)contents->my_addr, "");
+ assert_ptr_eq(contents, edata_base_get(edata), "");
+ assert_zu_eq(contents->size, edata_size_get(edata), "");
+ assert_ptr_eq(contents->my_edata, edata, "");
+
+ mem_contents_t *next = mem_tree_next(tree, contents);
+ if (next != NULL) {
+ node_assert_ordered(contents, next);
+ }
+ mem_contents_t *prev = mem_tree_prev(tree, contents);
+ if (prev != NULL) {
+ node_assert_ordered(prev, contents);
+ }
+}
+
+static void
+node_insert(mem_tree_t *tree, edata_t *edata, size_t npages) {
+ mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
+ contents->my_addr = (uintptr_t)edata_base_get(edata);
+ contents->size = edata_size_get(edata);
+ contents->my_edata = edata;
+ mem_tree_insert(tree, contents);
+ node_check(tree, contents);
+}
+
+static void
+node_remove(mem_tree_t *tree, edata_t *edata) {
+ mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
+ node_check(tree, contents);
+ mem_tree_remove(tree, contents);
+}
+
+TEST_BEGIN(test_stress) {
+ test_skip_if(!hpa_supported());
+
+ hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
+ &test_hpa_shard_opts_default);
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+
+ const size_t nlive_edatas_max = 500;
+ size_t nlive_edatas = 0;
+ edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *));
+ /*
+ * Nothing special about this constant; we're only fixing it for
+ * consistency across runs.
+ */
+ size_t prng_state = (size_t)0x76999ffb014df07c;
+
+ mem_tree_t tree;
+ mem_tree_new(&tree);
+
+ bool deferred_work_generated = false;
+
+ for (size_t i = 0; i < 100 * 1000; i++) {
+ size_t operation = prng_range_zu(&prng_state, 2);
+ if (operation == 0) {
+ /* Alloc */
+ if (nlive_edatas == nlive_edatas_max) {
+ continue;
+ }
+
+ /*
+ * We make sure to get an even balance of small and
+ * large allocations.
+ */
+ size_t npages_min = 1;
+ size_t npages_max = ALLOC_MAX / PAGE;
+ size_t npages = npages_min + prng_range_zu(&prng_state,
+ npages_max - npages_min);
+ edata_t *edata = pai_alloc(tsdn, &shard->pai,
+ npages * PAGE, PAGE, false, false, false,
+ &deferred_work_generated);
+ assert_ptr_not_null(edata,
+ "Unexpected allocation failure");
+ live_edatas[nlive_edatas] = edata;
+ nlive_edatas++;
+ node_insert(&tree, edata, npages);
+ } else {
+ /* Free. */
+ if (nlive_edatas == 0) {
+ continue;
+ }
+ size_t victim = prng_range_zu(&prng_state, nlive_edatas);
+ edata_t *to_free = live_edatas[victim];
+ live_edatas[victim] = live_edatas[nlive_edatas - 1];
+ nlive_edatas--;
+ node_remove(&tree, to_free);
+ pai_dalloc(tsdn, &shard->pai, to_free,
+ &deferred_work_generated);
+ }
+ }
+
+ size_t ntreenodes = 0;
+ for (mem_contents_t *contents = mem_tree_first(&tree); contents != NULL;
+ contents = mem_tree_next(&tree, contents)) {
+ ntreenodes++;
+ node_check(&tree, contents);
+ }
+ expect_zu_eq(ntreenodes, nlive_edatas, "");
+
+ /*
+ * Test hpa_shard_destroy, which requires as a precondition that all its
+ * extents have been deallocated.
+ */
+ for (size_t i = 0; i < nlive_edatas; i++) {
+ edata_t *to_free = live_edatas[i];
+ node_remove(&tree, to_free);
+ pai_dalloc(tsdn, &shard->pai, to_free,
+ &deferred_work_generated);
+ }
+ hpa_shard_destroy(tsdn, shard);
+
+ free(live_edatas);
+ destroy_test_data(shard);
+}
+TEST_END
+
+static void
+expect_contiguous(edata_t **edatas, size_t nedatas) {
+ for (size_t i = 0; i < nedatas; i++) {
+ size_t expected = (size_t)edata_base_get(edatas[0])
+ + i * PAGE;
+ expect_zu_eq(expected, (size_t)edata_base_get(edatas[i]),
+ "Mismatch at index %zu", i);
+ }
+}
+
+TEST_BEGIN(test_alloc_dalloc_batch) {
+ test_skip_if(!hpa_supported());
+
+ hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
+ &test_hpa_shard_opts_default);
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+
+ bool deferred_work_generated = false;
+
+ enum {NALLOCS = 8};
+
+ edata_t *allocs[NALLOCS];
+ /*
+ * Allocate a mix of ways; first half from regular alloc, second half
+ * from alloc_batch.
+ */
+ for (size_t i = 0; i < NALLOCS / 2; i++) {
+ allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false,
+ /* frequent_reuse */ false, &deferred_work_generated);
+ expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
+ }
+ edata_list_active_t allocs_list;
+ edata_list_active_init(&allocs_list);
+ size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
+ &allocs_list, &deferred_work_generated);
+ expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
+ for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
+ allocs[i] = edata_list_active_first(&allocs_list);
+ edata_list_active_remove(&allocs_list, allocs[i]);
+ }
+
+ /*
+ * Should have allocated them contiguously, despite the differing
+ * methods used.
+ */
+ void *orig_base = edata_base_get(allocs[0]);
+ expect_contiguous(allocs, NALLOCS);
+
+ /*
+ * Batch dalloc the first half, individually deallocate the second half.
+ */
+ for (size_t i = 0; i < NALLOCS / 2; i++) {
+ edata_list_active_append(&allocs_list, allocs[i]);
+ }
+ pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
+ &deferred_work_generated);
+ for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
+ pai_dalloc(tsdn, &shard->pai, allocs[i],
+ &deferred_work_generated);
+ }
+
+ /* Reallocate (individually), and ensure reuse and contiguity. */
+ for (size_t i = 0; i < NALLOCS; i++) {
+ allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
+ }
+ void *new_base = edata_base_get(allocs[0]);
+ expect_ptr_eq(orig_base, new_base,
+ "Failed to reuse the allocated memory.");
+ expect_contiguous(allocs, NALLOCS);
+
+ destroy_test_data(shard);
+}
+TEST_END
+
+static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
+static void *
+defer_test_map(size_t size) {
+ void *result = (void *)defer_bump_ptr;
+ defer_bump_ptr += size;
+ return result;
+}
+
+static void
+defer_test_unmap(void *ptr, size_t size) {
+ (void)ptr;
+ (void)size;
+}
+
+static bool defer_purge_called = false;
+static void
+defer_test_purge(void *ptr, size_t size) {
+ (void)ptr;
+ (void)size;
+ defer_purge_called = true;
+}
+
+static bool defer_hugify_called = false;
+static void
+defer_test_hugify(void *ptr, size_t size) {
+ defer_hugify_called = true;
+}
+
+static bool defer_dehugify_called = false;
+static void
+defer_test_dehugify(void *ptr, size_t size) {
+ defer_dehugify_called = true;
+}
+
+static nstime_t defer_curtime;
+static void
+defer_test_curtime(nstime_t *r_time, bool first_reading) {
+ *r_time = defer_curtime;
+}
+
+static uint64_t
+defer_test_ms_since(nstime_t *past_time) {
+ return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
+}
+
+TEST_BEGIN(test_defer_time) {
+ test_skip_if(!hpa_supported());
+
+ hpa_hooks_t hooks;
+ hooks.map = &defer_test_map;
+ hooks.unmap = &defer_test_unmap;
+ hooks.purge = &defer_test_purge;
+ hooks.hugify = &defer_test_hugify;
+ hooks.dehugify = &defer_test_dehugify;
+ hooks.curtime = &defer_test_curtime;
+ hooks.ms_since = &defer_test_ms_since;
+
+ hpa_shard_opts_t opts = test_hpa_shard_opts_default;
+ opts.deferral_allowed = true;
+
+ hpa_shard_t *shard = create_test_data(&hooks, &opts);
+
+ bool deferred_work_generated = false;
+
+ nstime_init(&defer_curtime, 0);
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ edata_t *edatas[HUGEPAGE_PAGES];
+ for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
+ edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
+ false, false, &deferred_work_generated);
+ expect_ptr_not_null(edatas[i], "Unexpected null edata");
+ }
+ hpa_shard_do_deferred_work(tsdn, shard);
+ expect_false(defer_hugify_called, "Hugified too early");
+
+ /* Hugification delay is set to 10 seconds in options. */
+ nstime_init2(&defer_curtime, 11, 0);
+ hpa_shard_do_deferred_work(tsdn, shard);
+ expect_true(defer_hugify_called, "Failed to hugify");
+
+ defer_hugify_called = false;
+
+ /* Purge. Recall that dirty_mult is .25. */
+ for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
+ pai_dalloc(tsdn, &shard->pai, edatas[i],
+ &deferred_work_generated);
+ }
+
+ hpa_shard_do_deferred_work(tsdn, shard);
+
+ expect_false(defer_hugify_called, "Hugified too early");
+ expect_true(defer_dehugify_called, "Should have dehugified");
+ expect_true(defer_purge_called, "Should have purged");
+ defer_hugify_called = false;
+ defer_dehugify_called = false;
+ defer_purge_called = false;
+
+ /*
+ * Refill the page. We now meet the hugification threshold; we should
+ * be marked for pending hugify.
+ */
+ for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
+ edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
+ false, false, &deferred_work_generated);
+ expect_ptr_not_null(edatas[i], "Unexpected null edata");
+ }
+ /*
+ * We would be ineligible for hugification, had we not already met the
+ * threshold before dipping below it.
+ */
+ pai_dalloc(tsdn, &shard->pai, edatas[0],
+ &deferred_work_generated);
+ /* Wait for the threshold again. */
+ nstime_init2(&defer_curtime, 22, 0);
+ hpa_shard_do_deferred_work(tsdn, shard);
+ expect_true(defer_hugify_called, "Hugified too early");
+ expect_false(defer_dehugify_called, "Unexpected dehugify");
+ expect_false(defer_purge_called, "Unexpected purge");
+
+ destroy_test_data(shard);
+}
+TEST_END
+
+int
+main(void) {
+ /*
+ * These trigger unused-function warnings on CI runs, even if declared
+ * with static inline.
+ */
+ (void)mem_tree_empty;
+ (void)mem_tree_last;
+ (void)mem_tree_search;
+ (void)mem_tree_nsearch;
+ (void)mem_tree_psearch;
+ (void)mem_tree_iter;
+ (void)mem_tree_reverse_iter;
+ (void)mem_tree_destroy;
+ return test_no_reentrancy(
+ test_alloc_max,
+ test_stress,
+ test_alloc_dalloc_batch,
+ test_defer_time);
+}
diff --git a/deps/jemalloc/test/unit/hpa_background_thread.c b/deps/jemalloc/test/unit/hpa_background_thread.c
new file mode 100644
index 0000000..81c2561
--- /dev/null
+++ b/deps/jemalloc/test/unit/hpa_background_thread.c
@@ -0,0 +1,188 @@
+#include "test/jemalloc_test.h"
+#include "test/sleep.h"
+
+static void
+sleep_for_background_thread_interval() {
+ /*
+ * The sleep interval set in our .sh file is 50ms. So it likely will
+ * run if we sleep for four times that.
+ */
+ sleep_ns(200 * 1000 * 1000);
+}
+
+static unsigned
+create_arena() {
+ unsigned arena_ind;
+ size_t sz;
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 2),
+ 0, "Unexpected mallctl() failure");
+ return arena_ind;
+}
+
+static size_t
+get_empty_ndirty(unsigned arena_ind) {
+ int err;
+ size_t ndirty_huge;
+ size_t ndirty_nonhuge;
+ uint64_t epoch = 1;
+ size_t sz = sizeof(epoch);
+ err = je_mallctl("epoch", (void *)&epoch, &sz, (void *)&epoch,
+ sizeof(epoch));
+ expect_d_eq(0, err, "Unexpected mallctl() failure");
+
+ size_t mib[6];
+ size_t miblen = sizeof(mib)/sizeof(mib[0]);
+ err = mallctlnametomib(
+ "stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", mib,
+ &miblen);
+ expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
+
+ sz = sizeof(ndirty_nonhuge);
+ mib[2] = arena_ind;
+ err = mallctlbymib(mib, miblen, &ndirty_nonhuge, &sz, NULL, 0);
+ expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
+
+ err = mallctlnametomib(
+ "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib,
+ &miblen);
+ expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
+
+ sz = sizeof(ndirty_huge);
+ mib[2] = arena_ind;
+ err = mallctlbymib(mib, miblen, &ndirty_huge, &sz, NULL, 0);
+ expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
+
+ return ndirty_huge + ndirty_nonhuge;
+}
+
+static void
+set_background_thread_enabled(bool enabled) {
+ int err;
+ err = je_mallctl("background_thread", NULL, NULL, &enabled,
+ sizeof(enabled));
+ expect_d_eq(0, err, "Unexpected mallctl failure");
+}
+
+static void
+wait_until_thread_is_enabled(unsigned arena_id) {
+ tsd_t* tsd = tsd_fetch();
+
+ bool sleeping = false;
+ int iterations = 0;
+ do {
+ background_thread_info_t *info =
+ background_thread_info_get(arena_id);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ sleeping = background_thread_indefinite_sleep(info);
+ assert_d_lt(iterations, UINT64_C(1000000),
+ "Waiting for a thread to start for too long");
+ } while (!sleeping);
+}
+
+static void
+expect_purging(unsigned arena_ind, bool expect_deferred) {
+ size_t empty_ndirty;
+
+ empty_ndirty = get_empty_ndirty(arena_ind);
+ expect_zu_eq(0, empty_ndirty, "Expected arena to start unused.");
+
+ /*
+ * It's possible that we get unlucky with our stats collection timing,
+ * and the background thread runs in between the deallocation and the
+ * stats collection. So we retry 10 times, and see if we *ever* see
+ * deferred reclamation.
+ */
+ bool observed_dirty_page = false;
+ for (int i = 0; i < 10; i++) {
+ void *ptr = mallocx(PAGE,
+ MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
+ empty_ndirty = get_empty_ndirty(arena_ind);
+ expect_zu_eq(0, empty_ndirty, "All pages should be active");
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ empty_ndirty = get_empty_ndirty(arena_ind);
+ if (expect_deferred) {
+ expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
+ opt_prof, "Unexpected extra dirty page count: %zu",
+ empty_ndirty);
+ } else {
+ assert_zu_eq(0, empty_ndirty,
+ "Saw dirty pages without deferred purging");
+ }
+ if (empty_ndirty > 0) {
+ observed_dirty_page = true;
+ break;
+ }
+ }
+ expect_b_eq(expect_deferred, observed_dirty_page, "");
+
+ /*
+ * Under high concurrency / heavy test load (e.g. using run_test.sh),
+ * the background thread may not get scheduled for a longer period of
+ * time. Retry 100 times max before bailing out.
+ */
+ unsigned retry = 0;
+ while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 &&
+ expect_deferred && (retry++ < 100)) {
+ sleep_for_background_thread_interval();
+ }
+
+ expect_zu_eq(0, empty_ndirty, "Should have seen a background purge");
+}
+
+TEST_BEGIN(test_hpa_background_thread_purges) {
+ test_skip_if(!config_stats);
+ test_skip_if(!hpa_supported());
+ test_skip_if(!have_background_thread);
+ /* Skip since guarded pages cannot be allocated from hpa. */
+ test_skip_if(san_guard_enabled());
+
+ unsigned arena_ind = create_arena();
+ /*
+ * Our .sh sets dirty mult to 0, so all dirty pages should get purged
+ * any time any thread frees.
+ */
+ expect_purging(arena_ind, /* expect_deferred */ true);
+}
+TEST_END
+
+TEST_BEGIN(test_hpa_background_thread_enable_disable) {
+ test_skip_if(!config_stats);
+ test_skip_if(!hpa_supported());
+ test_skip_if(!have_background_thread);
+ /* Skip since guarded pages cannot be allocated from hpa. */
+ test_skip_if(san_guard_enabled());
+
+ unsigned arena_ind = create_arena();
+
+ set_background_thread_enabled(false);
+ expect_purging(arena_ind, false);
+
+ set_background_thread_enabled(true);
+ wait_until_thread_is_enabled(arena_ind);
+ expect_purging(arena_ind, true);
+}
+TEST_END
+
+int
+main(void) {
+ /*
+ * OK, this is a sort of nasty hack. We don't want to add *another*
+ * config option for HPA (the intent is that it becomes available on
+ * more platforms over time, and we're trying to prune back config
+ * options generally. But we'll get initialization errors on other
+ * platforms if we set hpa:true in the MALLOC_CONF (even if we set
+ * abort_conf:false as well). So we reach into the internals and set
+ * them directly, but only if we know that we're actually going to do
+ * something nontrivial in the tests.
+ */
+ if (config_stats && hpa_supported() && have_background_thread) {
+ opt_hpa = true;
+ opt_background_thread = true;
+ }
+ return test_no_reentrancy(
+ test_hpa_background_thread_purges,
+ test_hpa_background_thread_enable_disable);
+}
diff --git a/deps/jemalloc/test/unit/hpa_background_thread.sh b/deps/jemalloc/test/unit/hpa_background_thread.sh
new file mode 100644
index 0000000..65a56a0
--- /dev/null
+++ b/deps/jemalloc/test/unit/hpa_background_thread.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+export MALLOC_CONF="hpa_dirty_mult:0,hpa_min_purge_interval_ms:50,hpa_sec_nshards:0"
+
diff --git a/deps/jemalloc/test/unit/hpdata.c b/deps/jemalloc/test/unit/hpdata.c
new file mode 100644
index 0000000..288e71d
--- /dev/null
+++ b/deps/jemalloc/test/unit/hpdata.c
@@ -0,0 +1,244 @@
+#include "test/jemalloc_test.h"
+
+#define HPDATA_ADDR ((void *)(10 * HUGEPAGE))
+#define HPDATA_AGE 123
+
+TEST_BEGIN(test_reserve_alloc) {
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ /* Allocating a page at a time, we should do first fit. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(HUGEPAGE_PAGES - i,
+ hpdata_longest_free_range_get(&hpdata), "");
+ void *alloc = hpdata_reserve_alloc(&hpdata, PAGE);
+ expect_ptr_eq((char *)HPDATA_ADDR + i * PAGE, alloc, "");
+ expect_true(hpdata_consistent(&hpdata), "");
+ }
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(0, hpdata_longest_free_range_get(&hpdata), "");
+
+ /*
+ * Build up a bigger free-range, 2 pages at a time, until we've got 6
+ * adjacent free pages total. Pages 8-13 should be unreserved after
+ * this.
+ */
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 10 * PAGE, 2 * PAGE);
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(2, hpdata_longest_free_range_get(&hpdata), "");
+
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 12 * PAGE, 2 * PAGE);
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(4, hpdata_longest_free_range_get(&hpdata), "");
+
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 8 * PAGE, 2 * PAGE);
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
+
+ /*
+ * Leave page 14 reserved, but free page 15 (this test the case where
+ * unreserving combines two ranges).
+ */
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 15 * PAGE, PAGE);
+ /*
+ * Longest free range shouldn't change; we've got a free range of size
+ * 6, then a reserved page, then another free range.
+ */
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
+
+ /* After freeing page 14, the two ranges get combined. */
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 14 * PAGE, PAGE);
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(8, hpdata_longest_free_range_get(&hpdata), "");
+}
+TEST_END
+
+TEST_BEGIN(test_purge_simple) {
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE_PAGES / 2 * PAGE);
+ expect_ptr_eq(alloc, HPDATA_ADDR, "");
+
+ /* Create HUGEPAGE_PAGES / 4 dirty inactive pages at the beginning. */
+ hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, "");
+
+ hpdata_alloc_allowed_set(&hpdata, false);
+ hpdata_purge_state_t purge_state;
+ size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state);
+ expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge, "");
+
+ void *purge_addr;
+ size_t purge_size;
+ bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
+ expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_false(got_result, "Unexpected additional purge range: "
+ "extent at %p of size %zu", purge_addr, purge_size);
+
+ hpdata_purge_end(&hpdata, &purge_state);
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, "");
+}
+TEST_END
+
+/*
+ * We only test intervening dalloc's not intervening allocs; the latter are
+ * disallowed as a purging precondition (because they interfere with purging
+ * across a retained extent, saving a purge call).
+ */
+TEST_BEGIN(test_purge_intervening_dalloc) {
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ /* Allocate the first 3/4 of the pages. */
+ void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE);
+ expect_ptr_eq(alloc, HPDATA_ADDR, "");
+
+ /* Free the first 1/4 and the third 1/4 of the pages. */
+ hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
+ hpdata_unreserve(&hpdata,
+ (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
+ HUGEPAGE_PAGES / 4 * PAGE);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
+
+ hpdata_alloc_allowed_set(&hpdata, false);
+ hpdata_purge_state_t purge_state;
+ size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state);
+ expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge, "");
+
+ void *purge_addr;
+ size_t purge_size;
+ /* First purge. */
+ bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
+ expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ /* Deallocate the second 1/4 before the second purge occurs. */
+ hpdata_unreserve(&hpdata,
+ (void *)((uintptr_t)alloc + 1 * HUGEPAGE_PAGES / 4 * PAGE),
+ HUGEPAGE_PAGES / 4 * PAGE);
+
+ /* Now continue purging. */
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(
+ (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
+ purge_addr, "");
+ expect_zu_ge(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_false(got_result, "Unexpected additional purge range: "
+ "extent at %p of size %zu", purge_addr, purge_size);
+
+ hpdata_purge_end(&hpdata, &purge_state);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, "");
+}
+TEST_END
+
+TEST_BEGIN(test_purge_over_retained) {
+ void *purge_addr;
+ size_t purge_size;
+
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ /* Allocate the first 3/4 of the pages. */
+ void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE);
+ expect_ptr_eq(alloc, HPDATA_ADDR, "");
+
+ /* Free the second quarter. */
+ void *second_quarter =
+ (void *)((uintptr_t)alloc + HUGEPAGE_PAGES / 4 * PAGE);
+ hpdata_unreserve(&hpdata, second_quarter, HUGEPAGE_PAGES / 4 * PAGE);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
+
+ /* Purge the second quarter. */
+ hpdata_alloc_allowed_set(&hpdata, false);
+ hpdata_purge_state_t purge_state;
+ size_t to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state);
+ expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge_dirty, "");
+
+ bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(second_quarter, purge_addr, "");
+ expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_false(got_result, "Unexpected additional purge range: "
+ "extent at %p of size %zu", purge_addr, purge_size);
+ hpdata_purge_end(&hpdata, &purge_state);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, "");
+
+ /* Free the first and third quarter. */
+ hpdata_unreserve(&hpdata, HPDATA_ADDR, HUGEPAGE_PAGES / 4 * PAGE);
+ hpdata_unreserve(&hpdata,
+ (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
+ HUGEPAGE_PAGES / 4 * PAGE);
+
+ /*
+ * Purge again. The second quarter is retained, so we can safely
+ * re-purge it. We expect a single purge of 3/4 of the hugepage,
+ * purging half its pages.
+ */
+ to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state);
+ expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge_dirty, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
+ expect_zu_eq(3 * HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_false(got_result, "Unexpected additional purge range: "
+ "extent at %p of size %zu", purge_addr, purge_size);
+ hpdata_purge_end(&hpdata, &purge_state);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), 0, "");
+}
+TEST_END
+
+TEST_BEGIN(test_hugify) {
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE / 2);
+ expect_ptr_eq(alloc, HPDATA_ADDR, "");
+
+ expect_zu_eq(HUGEPAGE_PAGES / 2, hpdata_ntouched_get(&hpdata), "");
+
+ hpdata_hugify(&hpdata);
+
+ /* Hugeifying should have increased the dirty page count. */
+ expect_zu_eq(HUGEPAGE_PAGES, hpdata_ntouched_get(&hpdata), "");
+}
+TEST_END
+
+int main(void) {
+ return test_no_reentrancy(
+ test_reserve_alloc,
+ test_purge_simple,
+ test_purge_intervening_dalloc,
+ test_purge_over_retained,
+ test_hugify);
+}
diff --git a/deps/jemalloc/test/unit/huge.c b/deps/jemalloc/test/unit/huge.c
new file mode 100644
index 0000000..ec64e50
--- /dev/null
+++ b/deps/jemalloc/test/unit/huge.c
@@ -0,0 +1,108 @@
+#include "test/jemalloc_test.h"
+
+/* Threshold: 2 << 20 = 2097152. */
+const char *malloc_conf = "oversize_threshold:2097152";
+
+#define HUGE_SZ (2 << 20)
+#define SMALL_SZ (8)
+
+TEST_BEGIN(huge_bind_thread) {
+ unsigned arena1, arena2;
+ size_t sz = sizeof(unsigned);
+
+ /* Bind to a manual arena. */
+ expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
+ "Failed to create arena");
+ expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
+ sizeof(arena1)), 0, "Fail to bind thread");
+
+ void *ptr = mallocx(HUGE_SZ, 0);
+ expect_ptr_not_null(ptr, "Fail to allocate huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ expect_u_eq(arena1, arena2, "Wrong arena used after binding");
+ dallocx(ptr, 0);
+
+ /* Switch back to arena 0. */
+ test_skip_if(have_percpu_arena &&
+ PERCPU_ARENA_ENABLED(opt_percpu_arena));
+ arena2 = 0;
+ expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
+ sizeof(arena2)), 0, "Fail to bind thread");
+ ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ expect_u_eq(arena2, 0, "Wrong arena used after binding");
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+
+ /* Then huge allocation should use the huge arena. */
+ ptr = mallocx(HUGE_SZ, 0);
+ expect_ptr_not_null(ptr, "Fail to allocate huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ expect_u_ne(arena2, 0, "Wrong arena used after binding");
+ expect_u_ne(arena1, arena2, "Wrong arena used after binding");
+ dallocx(ptr, 0);
+}
+TEST_END
+
+TEST_BEGIN(huge_mallocx) {
+ unsigned arena1, arena2;
+ size_t sz = sizeof(unsigned);
+
+ expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
+ "Failed to create arena");
+ void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1));
+ expect_ptr_not_null(huge, "Fail to allocate huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
+ sizeof(huge)), 0, "Unexpected mallctl() failure");
+ expect_u_eq(arena1, arena2, "Wrong arena used for mallocx");
+ dallocx(huge, MALLOCX_ARENA(arena1));
+
+ void *huge2 = mallocx(HUGE_SZ, 0);
+ expect_ptr_not_null(huge, "Fail to allocate huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
+ sizeof(huge2)), 0, "Unexpected mallctl() failure");
+ expect_u_ne(arena1, arena2,
+ "Huge allocation should not come from the manual arena.");
+ expect_u_ne(arena2, 0,
+ "Huge allocation should not come from the arena 0.");
+ dallocx(huge2, 0);
+}
+TEST_END
+
+TEST_BEGIN(huge_allocation) {
+ unsigned arena1, arena2;
+
+ void *ptr = mallocx(HUGE_SZ, 0);
+ expect_ptr_not_null(ptr, "Fail to allocate huge size");
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
+ 0, "Unexpected mallctl() failure");
+ expect_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
+ dallocx(ptr, 0);
+
+ ptr = mallocx(HUGE_SZ >> 1, 0);
+ expect_ptr_not_null(ptr, "Fail to allocate half huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ expect_u_ne(arena1, arena2, "Wrong arena used for half huge");
+ dallocx(ptr, 0);
+
+ ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(ptr, "Fail to allocate small size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ expect_u_ne(arena1, arena2,
+ "Huge and small should be from different arenas");
+ dallocx(ptr, 0);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ huge_allocation,
+ huge_mallocx,
+ huge_bind_thread);
+}
diff --git a/deps/jemalloc/test/unit/inspect.c b/deps/jemalloc/test/unit/inspect.c
new file mode 100644
index 0000000..fe59e59
--- /dev/null
+++ b/deps/jemalloc/test/unit/inspect.c
@@ -0,0 +1,278 @@
+#include "test/jemalloc_test.h"
+
+#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \
+ assert_d_eq(mallctl("experimental.utilization." node, \
+ a, b, c, d), EINVAL, "Should fail when " why_inval); \
+ assert_zu_eq(out_sz, out_sz_ref, \
+ "Output size touched when given invalid arguments"); \
+ assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \
+ "Output content touched when given invalid arguments"); \
+} while (0)
+
+#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \
+ TEST_UTIL_EINVAL("query", a, b, c, d, why_inval)
+#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \
+ TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval)
+
+#define TEST_UTIL_VALID(node) do { \
+ assert_d_eq(mallctl("experimental.utilization." node, \
+ out, &out_sz, in, in_sz), 0, \
+ "Should return 0 on correct arguments"); \
+ expect_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
+ expect_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
+ "Output content should be changed"); \
+} while (0)
+
+#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query")
+
+#define TEST_MAX_SIZE (1 << 20)
+
+TEST_BEGIN(test_query) {
+ size_t sz;
+ /*
+ * Select some sizes that can span both small and large sizes, and are
+ * numerically unrelated to any size boundaries.
+ */
+ for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
+ sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) {
+ void *p = mallocx(sz, 0);
+ void **in = &p;
+ size_t in_sz = sizeof(const void *);
+ size_t out_sz = sizeof(void *) + sizeof(size_t) * 5;
+ void *out = mallocx(out_sz, 0);
+ void *out_ref = mallocx(out_sz, 0);
+ size_t out_sz_ref = out_sz;
+
+ assert_ptr_not_null(p,
+ "test pointer allocation failed");
+ assert_ptr_not_null(out,
+ "test output allocation failed");
+ assert_ptr_not_null(out_ref,
+ "test reference output allocation failed");
+
+#define SLABCUR_READ(out) (*(void **)out)
+#define COUNTS(out) ((size_t *)((void **)out + 1))
+#define NFREE_READ(out) COUNTS(out)[0]
+#define NREGS_READ(out) COUNTS(out)[1]
+#define SIZE_READ(out) COUNTS(out)[2]
+#define BIN_NFREE_READ(out) COUNTS(out)[3]
+#define BIN_NREGS_READ(out) COUNTS(out)[4]
+
+ SLABCUR_READ(out) = NULL;
+ NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1;
+ BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1;
+ memcpy(out_ref, out, out_sz);
+
+ /* Test invalid argument(s) errors */
+ TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz,
+ "old is NULL");
+ TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz,
+ "oldlenp is NULL");
+ TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz,
+ "newp is NULL");
+ TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0,
+ "newlen is zero");
+ in_sz -= 1;
+ TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
+ "invalid newlen");
+ in_sz += 1;
+ out_sz_ref = out_sz -= 2 * sizeof(size_t);
+ TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
+ "invalid *oldlenp");
+ out_sz_ref = out_sz += 2 * sizeof(size_t);
+
+ /* Examine output for valid call */
+ TEST_UTIL_VALID("query");
+ expect_zu_le(sz, SIZE_READ(out),
+ "Extent size should be at least allocation size");
+ expect_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
+ "Extent size should be a multiple of page size");
+
+ /*
+ * We don't do much bin checking if prof is on, since profiling
+ * can produce extents that are for small size classes but not
+ * slabs, which interferes with things like region counts.
+ */
+ if (!opt_prof && sz <= SC_SMALL_MAXCLASS) {
+ expect_zu_le(NFREE_READ(out), NREGS_READ(out),
+ "Extent free count exceeded region count");
+ expect_zu_le(NREGS_READ(out), SIZE_READ(out),
+ "Extent region count exceeded size");
+ expect_zu_ne(NREGS_READ(out), 0,
+ "Extent region count must be positive");
+ expect_true(NFREE_READ(out) == 0 || (SLABCUR_READ(out)
+ != NULL && SLABCUR_READ(out) <= p),
+ "Allocation should follow first fit principle");
+
+ if (config_stats) {
+ expect_zu_le(BIN_NFREE_READ(out),
+ BIN_NREGS_READ(out),
+ "Bin free count exceeded region count");
+ expect_zu_ne(BIN_NREGS_READ(out), 0,
+ "Bin region count must be positive");
+ expect_zu_le(NFREE_READ(out),
+ BIN_NFREE_READ(out),
+ "Extent free count exceeded bin free count");
+ expect_zu_le(NREGS_READ(out),
+ BIN_NREGS_READ(out),
+ "Extent region count exceeded "
+ "bin region count");
+ expect_zu_eq(BIN_NREGS_READ(out)
+ % NREGS_READ(out), 0,
+ "Bin region count isn't a multiple of "
+ "extent region count");
+ expect_zu_le(
+ BIN_NFREE_READ(out) - NFREE_READ(out),
+ BIN_NREGS_READ(out) - NREGS_READ(out),
+ "Free count in other extents in the bin "
+ "exceeded region count in other extents "
+ "in the bin");
+ expect_zu_le(NREGS_READ(out) - NFREE_READ(out),
+ BIN_NREGS_READ(out) - BIN_NFREE_READ(out),
+ "Extent utilized count exceeded "
+ "bin utilized count");
+ }
+ } else if (sz > SC_SMALL_MAXCLASS) {
+ expect_zu_eq(NFREE_READ(out), 0,
+ "Extent free count should be zero");
+ expect_zu_eq(NREGS_READ(out), 1,
+ "Extent region count should be one");
+ expect_ptr_null(SLABCUR_READ(out),
+ "Current slab must be null for large size classes");
+ if (config_stats) {
+ expect_zu_eq(BIN_NFREE_READ(out), 0,
+ "Bin free count must be zero for "
+ "large sizes");
+ expect_zu_eq(BIN_NREGS_READ(out), 0,
+ "Bin region count must be zero for "
+ "large sizes");
+ }
+ }
+
+#undef BIN_NREGS_READ
+#undef BIN_NFREE_READ
+#undef SIZE_READ
+#undef NREGS_READ
+#undef NFREE_READ
+#undef COUNTS
+#undef SLABCUR_READ
+
+ free(out_ref);
+ free(out);
+ free(p);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_batch) {
+ size_t sz;
+ /*
+ * Select some sizes that can span both small and large sizes, and are
+ * numerically unrelated to any size boundaries.
+ */
+ for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
+ sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) {
+ void *p = mallocx(sz, 0);
+ void *q = mallocx(sz, 0);
+ void *in[] = {p, q};
+ size_t in_sz = sizeof(const void *) * 2;
+ size_t out[] = {-1, -1, -1, -1, -1, -1};
+ size_t out_sz = sizeof(size_t) * 6;
+ size_t out_ref[] = {-1, -1, -1, -1, -1, -1};
+ size_t out_sz_ref = out_sz;
+
+ assert_ptr_not_null(p, "test pointer allocation failed");
+ assert_ptr_not_null(q, "test pointer allocation failed");
+
+ /* Test invalid argument(s) errors */
+ TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz,
+ "old is NULL");
+ TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz,
+ "oldlenp is NULL");
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz,
+ "newp is NULL");
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0,
+ "newlen is zero");
+ in_sz -= 1;
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
+ "newlen is not an exact multiple");
+ in_sz += 1;
+ out_sz_ref = out_sz -= 2 * sizeof(size_t);
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
+ "*oldlenp is not an exact multiple");
+ out_sz_ref = out_sz += 2 * sizeof(size_t);
+ in_sz -= sizeof(const void *);
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
+ "*oldlenp and newlen do not match");
+ in_sz += sizeof(const void *);
+
+ /* Examine output for valid calls */
+#define TEST_EQUAL_REF(i, message) \
+ assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message)
+
+#define NFREE_READ(out, i) out[(i) * 3]
+#define NREGS_READ(out, i) out[(i) * 3 + 1]
+#define SIZE_READ(out, i) out[(i) * 3 + 2]
+
+ out_sz_ref = out_sz /= 2;
+ in_sz /= 2;
+ TEST_UTIL_BATCH_VALID;
+ expect_zu_le(sz, SIZE_READ(out, 0),
+ "Extent size should be at least allocation size");
+ expect_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
+ "Extent size should be a multiple of page size");
+ /*
+ * See the corresponding comment in test_query; profiling breaks
+ * our slab count expectations.
+ */
+ if (sz <= SC_SMALL_MAXCLASS && !opt_prof) {
+ expect_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
+ "Extent free count exceeded region count");
+ expect_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
+ "Extent region count exceeded size");
+ expect_zu_ne(NREGS_READ(out, 0), 0,
+ "Extent region count must be positive");
+ } else if (sz > SC_SMALL_MAXCLASS) {
+ expect_zu_eq(NFREE_READ(out, 0), 0,
+ "Extent free count should be zero");
+ expect_zu_eq(NREGS_READ(out, 0), 1,
+ "Extent region count should be one");
+ }
+ TEST_EQUAL_REF(1,
+ "Should not overwrite content beyond what's needed");
+ in_sz *= 2;
+ out_sz_ref = out_sz *= 2;
+
+ memcpy(out_ref, out, 3 * sizeof(size_t));
+ TEST_UTIL_BATCH_VALID;
+ TEST_EQUAL_REF(0, "Statistics should be stable across calls");
+ if (sz <= SC_SMALL_MAXCLASS) {
+ expect_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
+ "Extent free count exceeded region count");
+ } else {
+ expect_zu_eq(NFREE_READ(out, 0), 0,
+ "Extent free count should be zero");
+ }
+ expect_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
+ "Extent region count should be same for same region size");
+ expect_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
+ "Extent size should be same for same region size");
+
+#undef SIZE_READ
+#undef NREGS_READ
+#undef NFREE_READ
+
+#undef TEST_EQUAL_REF
+
+ free(q);
+ free(p);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ assert_zu_lt(SC_SMALL_MAXCLASS + 100000, TEST_MAX_SIZE,
+ "Test case cannot cover large classes");
+ return test(test_query, test_batch);
+}
diff --git a/deps/jemalloc/test/unit/inspect.sh b/deps/jemalloc/test/unit/inspect.sh
new file mode 100644
index 0000000..352d110
--- /dev/null
+++ b/deps/jemalloc/test/unit/inspect.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:false"
+fi
diff --git a/deps/jemalloc/test/unit/junk.c b/deps/jemalloc/test/unit/junk.c
new file mode 100644
index 0000000..543092f
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk.c
@@ -0,0 +1,195 @@
+#include "test/jemalloc_test.h"
+
+#define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
+static size_t ptr_ind;
+static void *volatile ptrs[100];
+static void *last_junked_ptr;
+static size_t last_junked_usize;
+
+static void
+reset() {
+ ptr_ind = 0;
+ last_junked_ptr = NULL;
+ last_junked_usize = 0;
+}
+
+static void
+test_junk(void *ptr, size_t usize) {
+ last_junked_ptr = ptr;
+ last_junked_usize = usize;
+}
+
+static void
+do_allocs(size_t size, bool zero, size_t lg_align) {
+#define JUNK_ALLOC(...) \
+ do { \
+ assert(ptr_ind + 1 < arraylen(ptrs)); \
+ void *ptr = __VA_ARGS__; \
+ assert_ptr_not_null(ptr, ""); \
+ ptrs[ptr_ind++] = ptr; \
+ if (opt_junk_alloc && !zero) { \
+ expect_ptr_eq(ptr, last_junked_ptr, ""); \
+ expect_zu_eq(last_junked_usize, \
+ TEST_MALLOC_SIZE(ptr), ""); \
+ } \
+ } while (0)
+ if (!zero && lg_align == 0) {
+ JUNK_ALLOC(malloc(size));
+ }
+ if (!zero) {
+ JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
+ }
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+ if (!zero) {
+ JUNK_ALLOC(je_memalign(1 << lg_align, size));
+ }
+#endif
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+ if (!zero && lg_align == LG_PAGE) {
+ JUNK_ALLOC(je_valloc(size));
+ }
+#endif
+ int zero_flag = zero ? MALLOCX_ZERO : 0;
+ JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
+ JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
+ | MALLOCX_TCACHE_NONE));
+ if (lg_align >= LG_SIZEOF_PTR) {
+ void *memalign_result;
+ int err = posix_memalign(&memalign_result, (1 << lg_align),
+ size);
+ assert_d_eq(err, 0, "");
+ JUNK_ALLOC(memalign_result);
+ }
+}
+
+TEST_BEGIN(test_junk_alloc_free) {
+ bool zerovals[] = {false, true};
+ size_t sizevals[] = {
+ 1, 8, 100, 1000, 100*1000
+ /*
+ * Memory allocation failure is a real possibility in 32-bit mode.
+ * Rather than try to check in the face of resource exhaustion, we just
+ * rely more on the 64-bit tests. This is a little bit white-box-y in
+ * the sense that this is only a good test strategy if we know that the
+ * junk pathways don't touch interact with the allocation selection
+ * mechanisms; but this is in fact the case.
+ */
+#if LG_SIZEOF_PTR == 3
+ , 10 * 1000 * 1000
+#endif
+ };
+ size_t lg_alignvals[] = {
+ 0, 4, 10, 15, 16, LG_PAGE
+#if LG_SIZEOF_PTR == 3
+ , 20, 24
+#endif
+ };
+
+#define JUNK_FREE(...) \
+ do { \
+ do_allocs(size, zero, lg_align); \
+ for (size_t n = 0; n < ptr_ind; n++) { \
+ void *ptr = ptrs[n]; \
+ __VA_ARGS__; \
+ if (opt_junk_free) { \
+ assert_ptr_eq(ptr, last_junked_ptr, \
+ ""); \
+ assert_zu_eq(usize, last_junked_usize, \
+ ""); \
+ } \
+ reset(); \
+ } \
+ } while (0)
+ for (size_t i = 0; i < arraylen(zerovals); i++) {
+ for (size_t j = 0; j < arraylen(sizevals); j++) {
+ for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
+ bool zero = zerovals[i];
+ size_t size = sizevals[j];
+ size_t lg_align = lg_alignvals[k];
+ size_t usize = nallocx(size,
+ MALLOCX_LG_ALIGN(lg_align));
+
+ JUNK_FREE(free(ptr));
+ JUNK_FREE(dallocx(ptr, 0));
+ JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
+ JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
+ lg_align)));
+ JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
+ lg_align)));
+ JUNK_FREE(sdallocx(ptr, usize,
+ MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
+ if (opt_zero_realloc_action
+ == zero_realloc_action_free) {
+ JUNK_FREE(realloc(ptr, 0));
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_realloc_expand) {
+ char *volatile ptr;
+ char *volatile expanded;
+
+ test_skip_if(!opt_junk_alloc);
+
+ /* Realloc */
+ ptr = malloc(SC_SMALL_MAXCLASS);
+ expanded = realloc(ptr, SC_LARGE_MINCLASS);
+ expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
+ expect_zu_eq(last_junked_usize,
+ SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
+ free(expanded);
+
+ /* rallocx(..., 0) */
+ ptr = malloc(SC_SMALL_MAXCLASS);
+ expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
+ expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
+ expect_zu_eq(last_junked_usize,
+ SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
+ free(expanded);
+
+ /* rallocx(..., nonzero) */
+ ptr = malloc(SC_SMALL_MAXCLASS);
+ expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
+ expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
+ expect_zu_eq(last_junked_usize,
+ SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
+ free(expanded);
+
+ /* rallocx(..., MALLOCX_ZERO) */
+ ptr = malloc(SC_SMALL_MAXCLASS);
+ last_junked_ptr = (void *)-1;
+ last_junked_usize = (size_t)-1;
+ expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
+ expect_ptr_eq(last_junked_ptr, (void *)-1, "");
+ expect_zu_eq(last_junked_usize, (size_t)-1, "");
+ free(expanded);
+
+ /*
+ * Unfortunately, testing xallocx reliably is difficult to do portably
+ * (since allocations can be expanded / not expanded differently on
+ * different platforms. We rely on manual inspection there -- the
+ * xallocx pathway is easy to inspect, though.
+ *
+ * Likewise, we don't test the shrinking pathways. It's difficult to do
+ * so consistently (because of the risk of split failure or memory
+ * exhaustion, in which case no junking should happen). This is fine
+ * -- junking is a best-effort debug mechanism in the first place.
+ */
+}
+TEST_END
+
+int
+main(void) {
+ junk_alloc_callback = &test_junk;
+ junk_free_callback = &test_junk;
+ /*
+ * We check the last pointer junked. If a reentrant call happens, that
+ * might be an internal allocation.
+ */
+ return test_no_reentrancy(
+ test_junk_alloc_free,
+ test_realloc_expand);
+}
diff --git a/deps/jemalloc/test/unit/junk.sh b/deps/jemalloc/test/unit/junk.sh
new file mode 100644
index 0000000..97cd8ca
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="abort:false,zero:false,junk:true"
+fi
diff --git a/deps/jemalloc/test/unit/junk_alloc.c b/deps/jemalloc/test/unit/junk_alloc.c
new file mode 100644
index 0000000..a442a0c
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_alloc.c
@@ -0,0 +1 @@
+#include "junk.c"
diff --git a/deps/jemalloc/test/unit/junk_alloc.sh b/deps/jemalloc/test/unit/junk_alloc.sh
new file mode 100644
index 0000000..e1008c2
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_alloc.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="abort:false,zero:false,junk:alloc"
+fi
diff --git a/deps/jemalloc/test/unit/junk_free.c b/deps/jemalloc/test/unit/junk_free.c
new file mode 100644
index 0000000..a442a0c
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_free.c
@@ -0,0 +1 @@
+#include "junk.c"
diff --git a/deps/jemalloc/test/unit/junk_free.sh b/deps/jemalloc/test/unit/junk_free.sh
new file mode 100644
index 0000000..402196c
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_free.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="abort:false,zero:false,junk:free"
+fi
diff --git a/deps/jemalloc/test/unit/log.c b/deps/jemalloc/test/unit/log.c
new file mode 100644
index 0000000..c09b589
--- /dev/null
+++ b/deps/jemalloc/test/unit/log.c
@@ -0,0 +1,198 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/log.h"
+
+static void
+update_log_var_names(const char *names) {
+ strncpy(log_var_names, names, sizeof(log_var_names));
+}
+
+static void
+expect_no_logging(const char *names) {
+ log_var_t log_l1 = LOG_VAR_INIT("l1");
+ log_var_t log_l2 = LOG_VAR_INIT("l2");
+ log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
+
+ update_log_var_names(names);
+
+ int count = 0;
+
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1)
+ count++;
+ log_do_end(log_l1)
+
+ log_do_begin(log_l2)
+ count++;
+ log_do_end(log_l2)
+
+ log_do_begin(log_l2_a)
+ count++;
+ log_do_end(log_l2_a)
+ }
+ expect_d_eq(count, 0, "Disabled logging not ignored!");
+}
+
+TEST_BEGIN(test_log_disabled) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
+ expect_no_logging("");
+ expect_no_logging("abc");
+ expect_no_logging("a.b.c");
+ expect_no_logging("l12");
+ expect_no_logging("l123|a456|b789");
+ expect_no_logging("|||");
+}
+TEST_END
+
+TEST_BEGIN(test_log_enabled_direct) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
+ log_var_t log_l1 = LOG_VAR_INIT("l1");
+ log_var_t log_l1_a = LOG_VAR_INIT("l1.a");
+ log_var_t log_l2 = LOG_VAR_INIT("l2");
+
+ int count;
+
+ count = 0;
+ update_log_var_names("l1");
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1)
+ count++;
+ log_do_end(log_l1)
+ }
+ expect_d_eq(count, 10, "Mis-logged!");
+
+ count = 0;
+ update_log_var_names("l1.a");
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1_a)
+ count++;
+ log_do_end(log_l1_a)
+ }
+ expect_d_eq(count, 10, "Mis-logged!");
+
+ count = 0;
+ update_log_var_names("l1.a|abc|l2|def");
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1_a)
+ count++;
+ log_do_end(log_l1_a)
+
+ log_do_begin(log_l2)
+ count++;
+ log_do_end(log_l2)
+ }
+ expect_d_eq(count, 20, "Mis-logged!");
+}
+TEST_END
+
+TEST_BEGIN(test_log_enabled_indirect) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
+ update_log_var_names("l0|l1|abc|l2.b|def");
+
+ /* On. */
+ log_var_t log_l1 = LOG_VAR_INIT("l1");
+ /* Off. */
+ log_var_t log_l1a = LOG_VAR_INIT("l1a");
+ /* On. */
+ log_var_t log_l1_a = LOG_VAR_INIT("l1.a");
+ /* Off. */
+ log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
+ /* On. */
+ log_var_t log_l2_b_a = LOG_VAR_INIT("l2.b.a");
+ /* On. */
+ log_var_t log_l2_b_b = LOG_VAR_INIT("l2.b.b");
+
+ /* 4 are on total, so should sum to 40. */
+ int count = 0;
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1)
+ count++;
+ log_do_end(log_l1)
+
+ log_do_begin(log_l1a)
+ count++;
+ log_do_end(log_l1a)
+
+ log_do_begin(log_l1_a)
+ count++;
+ log_do_end(log_l1_a)
+
+ log_do_begin(log_l2_a)
+ count++;
+ log_do_end(log_l2_a)
+
+ log_do_begin(log_l2_b_a)
+ count++;
+ log_do_end(log_l2_b_a)
+
+ log_do_begin(log_l2_b_b)
+ count++;
+ log_do_end(log_l2_b_b)
+ }
+
+ expect_d_eq(count, 40, "Mis-logged!");
+}
+TEST_END
+
+TEST_BEGIN(test_log_enabled_global) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
+ update_log_var_names("abc|.|def");
+
+ log_var_t log_l1 = LOG_VAR_INIT("l1");
+ log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a");
+
+ int count = 0;
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1)
+ count++;
+ log_do_end(log_l1)
+
+ log_do_begin(log_l2_a_a)
+ count++;
+ log_do_end(log_l2_a_a)
+ }
+ expect_d_eq(count, 20, "Mis-logged!");
+}
+TEST_END
+
+TEST_BEGIN(test_logs_if_no_init) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, false, ATOMIC_RELAXED);
+
+ log_var_t l = LOG_VAR_INIT("definitely.not.enabled");
+
+ int count = 0;
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(l)
+ count++;
+ log_do_end(l)
+ }
+ expect_d_eq(count, 0, "Logging shouldn't happen if not initialized.");
+}
+TEST_END
+
+/*
+ * This really just checks to make sure that this usage compiles; we don't have
+ * any test code to run.
+ */
+TEST_BEGIN(test_log_only_format_string) {
+ if (false) {
+ LOG("log_str", "No arguments follow this format string.");
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_log_disabled,
+ test_log_enabled_direct,
+ test_log_enabled_indirect,
+ test_log_enabled_global,
+ test_logs_if_no_init,
+ test_log_only_format_string);
+}
diff --git a/deps/jemalloc/test/unit/mallctl.c b/deps/jemalloc/test/unit/mallctl.c
new file mode 100644
index 0000000..6efc8f1
--- /dev/null
+++ b/deps/jemalloc/test/unit/mallctl.c
@@ -0,0 +1,1274 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/hook.h"
+#include "jemalloc/internal/util.h"
+
+TEST_BEGIN(test_mallctl_errors) {
+ uint64_t epoch;
+ size_t sz;
+
+ expect_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
+ "mallctl() should return ENOENT for non-existent names");
+
+ expect_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
+ EPERM, "mallctl() should return EPERM on attempt to write "
+ "read-only value");
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ sizeof(epoch)-1), EINVAL,
+ "mallctl() should return EINVAL for input size mismatch");
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ sizeof(epoch)+1), EINVAL,
+ "mallctl() should return EINVAL for input size mismatch");
+
+ sz = sizeof(epoch)-1;
+ expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
+ "mallctl() should return EINVAL for output size mismatch");
+ sz = sizeof(epoch)+1;
+ expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
+ "mallctl() should return EINVAL for output size mismatch");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_errors) {
+ size_t mib[1];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
+ "mallctlnametomib() should return ENOENT for non-existent names");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlbymib_errors) {
+ uint64_t epoch;
+ size_t sz;
+ size_t mib[1];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("version", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
+ strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
+ "attempt to write read-only value");
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
+ sizeof(epoch)-1), EINVAL,
+ "mallctlbymib() should return EINVAL for input size mismatch");
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
+ sizeof(epoch)+1), EINVAL,
+ "mallctlbymib() should return EINVAL for input size mismatch");
+
+ sz = sizeof(epoch)-1;
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
+ EINVAL,
+ "mallctlbymib() should return EINVAL for output size mismatch");
+ sz = sizeof(epoch)+1;
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
+ EINVAL,
+ "mallctlbymib() should return EINVAL for output size mismatch");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_read_write) {
+ uint64_t old_epoch, new_epoch;
+ size_t sz = sizeof(old_epoch);
+
+ /* Blind. */
+ expect_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Read. */
+ expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Write. */
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
+ sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
+ expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Read+write. */
+ expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
+ (void *)&new_epoch, sizeof(new_epoch)), 0,
+ "Unexpected mallctl() failure");
+ expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_short_mib) {
+ size_t mib[4];
+ size_t miblen;
+
+ miblen = 3;
+ mib[3] = 42;
+ expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ expect_zu_eq(miblen, 3, "Unexpected mib output length");
+ expect_zu_eq(mib[3], 42,
+ "mallctlnametomib() wrote past the end of the input mib");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_short_name) {
+ size_t mib[4];
+ size_t miblen;
+
+ miblen = 4;
+ mib[3] = 42;
+ expect_d_eq(mallctlnametomib("arenas.bin.0", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ expect_zu_eq(miblen, 3, "Unexpected mib output length");
+ expect_zu_eq(mib[3], 42,
+ "mallctlnametomib() wrote past the end of the input mib");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlmibnametomib) {
+ size_t mib[4];
+ size_t miblen = 4;
+ uint32_t result, result_ref;
+ size_t len_result = sizeof(uint32_t);
+
+ tsd_t *tsd = tsd_fetch();
+
+ /* Error cases */
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "bob", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "9999", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+
+ /* Valid case. */
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "arenas", &miblen), 0, "");
+ assert_zu_eq(miblen, 1, "");
+ miblen = 4;
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 1, "bin", &miblen), 0, "");
+ assert_zu_eq(miblen, 2, "");
+ expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
+ ENOENT, "mallctlbymib() should fail on partial path");
+
+ /* Error cases. */
+ miblen = 4;
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "bob", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "9999", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+
+ /* Valid case. */
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "0", &miblen), 0, "");
+ assert_zu_eq(miblen, 3, "");
+ expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
+ ENOENT, "mallctlbymib() should fail on partial path");
+
+ /* Error cases. */
+ miblen = 4;
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "bob", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "9999", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+
+ /* Valid case. */
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "nregs", &miblen), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ assert_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
+ 0, "Unexpected mallctlbymib() failure");
+ assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ expect_zu_eq(result, result_ref,
+ "mallctlbymib() and mallctl() returned different result");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlbymibname) {
+ size_t mib[4];
+ size_t miblen = 4;
+ uint32_t result, result_ref;
+ size_t len_result = sizeof(uint32_t);
+
+ tsd_t *tsd = tsd_fetch();
+
+ /* Error cases. */
+
+ assert_d_eq(mallctlnametomib("arenas", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ assert_zu_eq(miblen, 1, "");
+
+ miblen = 4;
+ assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0", &miblen,
+ &result, &len_result, NULL, 0), ENOENT, "");
+ miblen = 4;
+ assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.bob", &miblen,
+ &result, &len_result, NULL, 0), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+
+ /* Valid cases. */
+
+ assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ miblen = 4;
+
+ assert_d_eq(ctl_bymibname(tsd, mib, 0, "arenas.bin.0.nregs", &miblen,
+ &result, &len_result, NULL, 0), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ expect_zu_eq(result, result_ref, "Unexpected result");
+
+ assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.nregs", &miblen, &result,
+ &len_result, NULL, 0), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ expect_zu_eq(result, result_ref, "Unexpected result");
+
+ assert_d_eq(ctl_bymibname(tsd, mib, 2, "0.nregs", &miblen, &result,
+ &len_result, NULL, 0), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ expect_zu_eq(result, result_ref, "Unexpected result");
+
+ assert_d_eq(ctl_bymibname(tsd, mib, 3, "nregs", &miblen, &result,
+ &len_result, NULL, 0), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ expect_zu_eq(result, result_ref, "Unexpected result");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_config) {
+#define TEST_MALLCTL_CONFIG(config, t) do { \
+ t oldval; \
+ size_t sz = sizeof(oldval); \
+ expect_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
+ NULL, 0), 0, "Unexpected mallctl() failure"); \
+ expect_b_eq(oldval, config_##config, "Incorrect config value"); \
+ expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+} while (0)
+
+ TEST_MALLCTL_CONFIG(cache_oblivious, bool);
+ TEST_MALLCTL_CONFIG(debug, bool);
+ TEST_MALLCTL_CONFIG(fill, bool);
+ TEST_MALLCTL_CONFIG(lazy_lock, bool);
+ TEST_MALLCTL_CONFIG(malloc_conf, const char *);
+ TEST_MALLCTL_CONFIG(prof, bool);
+ TEST_MALLCTL_CONFIG(prof_libgcc, bool);
+ TEST_MALLCTL_CONFIG(prof_libunwind, bool);
+ TEST_MALLCTL_CONFIG(stats, bool);
+ TEST_MALLCTL_CONFIG(utrace, bool);
+ TEST_MALLCTL_CONFIG(xmalloc, bool);
+
+#undef TEST_MALLCTL_CONFIG
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_opt) {
+ bool config_always = true;
+
+#define TEST_MALLCTL_OPT(t, opt, config) do { \
+ t oldval; \
+ size_t sz = sizeof(oldval); \
+ int expected = config_##config ? 0 : ENOENT; \
+ int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
+ 0); \
+ expect_d_eq(result, expected, \
+ "Unexpected mallctl() result for opt."#opt); \
+ expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+} while (0)
+
+ TEST_MALLCTL_OPT(bool, abort, always);
+ TEST_MALLCTL_OPT(bool, abort_conf, always);
+ TEST_MALLCTL_OPT(bool, cache_oblivious, always);
+ TEST_MALLCTL_OPT(bool, trust_madvise, always);
+ TEST_MALLCTL_OPT(bool, confirm_conf, always);
+ TEST_MALLCTL_OPT(const char *, metadata_thp, always);
+ TEST_MALLCTL_OPT(bool, retain, always);
+ TEST_MALLCTL_OPT(const char *, dss, always);
+ TEST_MALLCTL_OPT(bool, hpa, always);
+ TEST_MALLCTL_OPT(size_t, hpa_slab_max_alloc, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_nshards, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_max_alloc, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_bytes_after_flush, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always);
+ TEST_MALLCTL_OPT(unsigned, narenas, always);
+ TEST_MALLCTL_OPT(const char *, percpu_arena, always);
+ TEST_MALLCTL_OPT(size_t, oversize_threshold, always);
+ TEST_MALLCTL_OPT(bool, background_thread, always);
+ TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
+ TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
+ TEST_MALLCTL_OPT(bool, stats_print, always);
+ TEST_MALLCTL_OPT(const char *, stats_print_opts, always);
+ TEST_MALLCTL_OPT(int64_t, stats_interval, always);
+ TEST_MALLCTL_OPT(const char *, stats_interval_opts, always);
+ TEST_MALLCTL_OPT(const char *, junk, fill);
+ TEST_MALLCTL_OPT(bool, zero, fill);
+ TEST_MALLCTL_OPT(bool, utrace, utrace);
+ TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
+ TEST_MALLCTL_OPT(bool, tcache, always);
+ TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always);
+ TEST_MALLCTL_OPT(size_t, tcache_max, always);
+ TEST_MALLCTL_OPT(const char *, thp, always);
+ TEST_MALLCTL_OPT(const char *, zero_realloc, always);
+ TEST_MALLCTL_OPT(bool, prof, prof);
+ TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
+ TEST_MALLCTL_OPT(bool, prof_active, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
+ TEST_MALLCTL_OPT(bool, prof_accum, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
+ TEST_MALLCTL_OPT(bool, prof_gdump, prof);
+ TEST_MALLCTL_OPT(bool, prof_final, prof);
+ TEST_MALLCTL_OPT(bool, prof_leak, prof);
+ TEST_MALLCTL_OPT(bool, prof_leak_error, prof);
+ TEST_MALLCTL_OPT(ssize_t, prof_recent_alloc_max, prof);
+ TEST_MALLCTL_OPT(bool, prof_stats, prof);
+ TEST_MALLCTL_OPT(bool, prof_sys_thread_name, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_san_uaf_align, uaf_detection);
+
+#undef TEST_MALLCTL_OPT
+}
+TEST_END
+
+TEST_BEGIN(test_manpage_example) {
+ unsigned nbins, i;
+ size_t mib[4];
+ size_t len, miblen;
+
+ len = sizeof(nbins);
+ expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ miblen = 4;
+ expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ for (i = 0; i < nbins; i++) {
+ size_t bin_size;
+
+ mib[2] = i;
+ len = sizeof(bin_size);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
+ NULL, 0), 0, "Unexpected mallctlbymib() failure");
+ /* Do something with bin_size... */
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_tcache_none) {
+ test_skip_if(!opt_tcache);
+
+ /* Allocate p and q. */
+ void *p0 = mallocx(42, 0);
+ expect_ptr_not_null(p0, "Unexpected mallocx() failure");
+ void *q = mallocx(42, 0);
+ expect_ptr_not_null(q, "Unexpected mallocx() failure");
+
+ /* Deallocate p and q, but bypass the tcache for q. */
+ dallocx(p0, 0);
+ dallocx(q, MALLOCX_TCACHE_NONE);
+
+ /* Make sure that tcache-based allocation returns p, not q. */
+ void *p1 = mallocx(42, 0);
+ expect_ptr_not_null(p1, "Unexpected mallocx() failure");
+ if (!opt_prof && !san_uaf_detection_enabled()) {
+ expect_ptr_eq(p0, p1,
+ "Expected tcache to allocate cached region");
+ }
+
+ /* Clean up. */
+ dallocx(p1, MALLOCX_TCACHE_NONE);
+}
+TEST_END
+
+TEST_BEGIN(test_tcache) {
+#define NTCACHES 10
+ unsigned tis[NTCACHES];
+ void *ps[NTCACHES];
+ void *qs[NTCACHES];
+ unsigned i;
+ size_t sz, psz, qsz;
+
+ psz = 42;
+ qsz = nallocx(psz, 0) + 1;
+
+ /* Create tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure, i=%u", i);
+ }
+
+ /* Exercise tcache ID recycling. */
+ for (i = 0; i < NTCACHES; i++) {
+ expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
+ (void *)&tis[i], sizeof(unsigned)), 0,
+ "Unexpected mallctl() failure, i=%u", i);
+ }
+ for (i = 0; i < NTCACHES; i++) {
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure, i=%u", i);
+ }
+
+ /* Flush empty tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+
+ /* Cache some allocations. */
+ for (i = 0; i < NTCACHES; i++) {
+ ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+ expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
+
+ qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
+ expect_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+ }
+
+ /* Verify that tcaches allocate cached regions. */
+ for (i = 0; i < NTCACHES; i++) {
+ void *p0 = ps[i];
+ ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+ expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ if (!san_uaf_detection_enabled()) {
+ expect_ptr_eq(ps[i], p0, "Expected mallocx() to "
+ "allocate cached region, i=%u", i);
+ }
+ }
+
+ /* Verify that reallocation uses cached regions. */
+ for (i = 0; i < NTCACHES; i++) {
+ void *q0 = qs[i];
+ qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
+ expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
+ i);
+ if (!san_uaf_detection_enabled()) {
+ expect_ptr_eq(qs[i], q0, "Expected rallocx() to "
+ "allocate cached region, i=%u", i);
+ }
+ /* Avoid undefined behavior in case of test failure. */
+ if (qs[i] == NULL) {
+ qs[i] = ps[i];
+ }
+ }
+ for (i = 0; i < NTCACHES; i++) {
+ dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+ }
+
+ /* Flush some non-empty tcaches. */
+ for (i = 0; i < NTCACHES/2; i++) {
+ expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+
+ /* Destroy tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
+ (void *)&tis[i], sizeof(unsigned)), 0,
+ "Unexpected mallctl() failure, i=%u", i);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_thread_arena) {
+ unsigned old_arena_ind, new_arena_ind, narenas;
+
+ const char *opa;
+ size_t sz = sizeof(opa);
+ expect_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ if (opt_oversize_threshold != 0) {
+ narenas--;
+ }
+ expect_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
+
+ if (strcmp(opa, "disabled") == 0) {
+ new_arena_ind = narenas - 1;
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ (void *)&new_arena_ind, sizeof(unsigned)), 0,
+ "Unexpected mallctl() failure");
+ new_arena_ind = 0;
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ (void *)&new_arena_ind, sizeof(unsigned)), 0,
+ "Unexpected mallctl() failure");
+ } else {
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
+ if (old_arena_ind != new_arena_ind) {
+ expect_d_eq(mallctl("thread.arena",
+ (void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
+ sizeof(unsigned)), EPERM, "thread.arena ctl "
+ "should not be allowed with percpu arena");
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_initialized) {
+ unsigned narenas, i;
+ size_t sz;
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ bool initialized;
+
+ sz = sizeof(narenas);
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+
+ expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ for (i = 0; i < narenas; i++) {
+ mib[1] = i;
+ sz = sizeof(initialized);
+ expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+ }
+
+ mib[1] = MALLCTL_ARENAS_ALL;
+ sz = sizeof(initialized);
+ expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ expect_true(initialized,
+ "Merged arena statistics should always be initialized");
+
+ /* Equivalent to the above but using mallctl() directly. */
+ sz = sizeof(initialized);
+ expect_d_eq(mallctl(
+ "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
+ (void *)&initialized, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ expect_true(initialized,
+ "Merged arena statistics should always be initialized");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_dirty_decay_ms) {
+ ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
+ size_t sz = sizeof(ssize_t);
+
+ expect_d_eq(mallctl("arena.0.dirty_decay_ms",
+ (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ dirty_decay_ms = -2;
+ expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ dirty_decay_ms = 0x7fffffff;
+ expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+
+ for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
+ dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
+ dirty_decay_ms++) {
+ ssize_t old_dirty_decay_ms;
+
+ expect_d_eq(mallctl("arena.0.dirty_decay_ms",
+ (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
+ sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
+ "Unexpected old arena.0.dirty_decay_ms");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
+ ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
+ size_t sz = sizeof(ssize_t);
+
+ expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
+ (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ muzzy_decay_ms = -2;
+ expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ muzzy_decay_ms = 0x7fffffff;
+ expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+
+ for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
+ muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
+ muzzy_decay_ms++) {
+ ssize_t old_muzzy_decay_ms;
+
+ expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
+ (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
+ sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
+ "Unexpected old arena.0.muzzy_decay_ms");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_purge) {
+ unsigned narenas;
+ size_t sz = sizeof(unsigned);
+ size_t mib[3];
+ size_t miblen = 3;
+
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = narenas;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+
+ mib[1] = MALLCTL_ARENAS_ALL;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_decay) {
+ unsigned narenas;
+ size_t sz = sizeof(unsigned);
+ size_t mib[3];
+ size_t miblen = 3;
+
+ expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = narenas;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+
+ mib[1] = MALLCTL_ARENAS_ALL;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_dss) {
+ const char *dss_prec_old, *dss_prec_new;
+ size_t sz = sizeof(dss_prec_old);
+ size_t mib[3];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+
+ dss_prec_new = "disabled";
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
+ (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
+ "Unexpected mallctl() failure");
+ expect_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
+
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
+ (void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
+ "Unexpected mallctl() failure");
+
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+ expect_str_ne(dss_prec_old, "primary",
+ "Unexpected value for dss precedence");
+
+ mib[1] = narenas_total_get();
+ dss_prec_new = "disabled";
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
+ (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
+ "Unexpected mallctl() failure");
+ expect_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
+
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
+ (void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
+ "Unexpected mallctl() failure");
+
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+ expect_str_ne(dss_prec_old, "primary",
+ "Unexpected value for dss precedence");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_retain_grow_limit) {
+ size_t old_limit, new_limit, default_limit;
+ size_t mib[3];
+ size_t miblen;
+
+ bool retain_enabled;
+ size_t sz = sizeof(retain_enabled);
+ expect_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ test_skip_if(!retain_enabled);
+
+ sz = sizeof(default_limit);
+ miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
+ 0, "Unexpected mallctlnametomib() error");
+
+ expect_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ expect_zu_eq(default_limit, SC_LARGE_MAXCLASS,
+ "Unexpected default for retain_grow_limit");
+
+ new_limit = PAGE - 1;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ sizeof(new_limit)), EFAULT, "Unexpected mallctl() success");
+
+ new_limit = PAGE + 1;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ sizeof(new_limit)), 0, "Unexpected mallctl() failure");
+ expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ expect_zu_eq(old_limit, PAGE,
+ "Unexpected value for retain_grow_limit");
+
+ /* Expect grow less than psize class 10. */
+ new_limit = sz_pind2sz(10) - 1;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ sizeof(new_limit)), 0, "Unexpected mallctl() failure");
+ expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ expect_zu_eq(old_limit, sz_pind2sz(9),
+ "Unexpected value for retain_grow_limit");
+
+ /* Restore to default. */
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
+ sizeof(default_limit)), 0, "Unexpected mallctl() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_dirty_decay_ms) {
+ ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
+ size_t sz = sizeof(ssize_t);
+
+ expect_d_eq(mallctl("arenas.dirty_decay_ms",
+ (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ dirty_decay_ms = -2;
+ expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ dirty_decay_ms = 0x7fffffff;
+ expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
+ "Expected mallctl() failure");
+
+ for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
+ dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
+ dirty_decay_ms++) {
+ ssize_t old_dirty_decay_ms;
+
+ expect_d_eq(mallctl("arenas.dirty_decay_ms",
+ (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
+ sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
+ "Unexpected old arenas.dirty_decay_ms");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_muzzy_decay_ms) {
+ ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
+ size_t sz = sizeof(ssize_t);
+
+ expect_d_eq(mallctl("arenas.muzzy_decay_ms",
+ (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ muzzy_decay_ms = -2;
+ expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ muzzy_decay_ms = 0x7fffffff;
+ expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
+ "Expected mallctl() failure");
+
+ for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
+ muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
+ muzzy_decay_ms++) {
+ ssize_t old_muzzy_decay_ms;
+
+ expect_d_eq(mallctl("arenas.muzzy_decay_ms",
+ (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
+ sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
+ "Unexpected old arenas.muzzy_decay_ms");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_constants) {
+#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ expect_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
+ expect_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
+ TEST_ARENAS_CONSTANT(size_t, page, PAGE);
+ TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
+ TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
+
+#undef TEST_ARENAS_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_bin_constants) {
+#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ expect_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
+ NULL, 0), 0, "Unexpected mallctl() failure"); \
+ expect_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
+ TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs);
+ TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
+ bin_infos[0].slab_size);
+ TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards);
+
+#undef TEST_ARENAS_BIN_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_lextent_constants) {
+#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ expect_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
+ &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
+ expect_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
+ SC_LARGE_MINCLASS);
+
+#undef TEST_ARENAS_LEXTENT_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_create) {
+ unsigned narenas_before, arena, narenas_after;
+ size_t sz = sizeof(unsigned);
+
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+
+ expect_u_eq(narenas_before+1, narenas_after,
+ "Unexpected number of arenas before versus after extension");
+ expect_u_eq(arena, narenas_after-1, "Unexpected arena index");
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_lookup) {
+ unsigned arena, arena1;
+ void *ptr;
+ size_t sz = sizeof(unsigned);
+
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
+ expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
+ 0, "Unexpected mallctl() failure");
+ expect_u_eq(arena, arena1, "Unexpected arena index");
+ dallocx(ptr, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_prof_active) {
+ /*
+ * If config_prof is off, then the test for prof_active in
+ * test_mallctl_opt was already enough.
+ */
+ test_skip_if(!config_prof);
+ test_skip_if(opt_prof);
+
+ bool active, old;
+ size_t len = sizeof(bool);
+
+ active = true;
+ expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), ENOENT,
+ "Setting prof_active to true should fail when opt_prof is off");
+ old = true;
+ expect_d_eq(mallctl("prof.active", &old, &len, &active, len), ENOENT,
+ "Setting prof_active to true should fail when opt_prof is off");
+ expect_true(old, "old value should not be touched when mallctl fails");
+ active = false;
+ expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), 0,
+ "Setting prof_active to false should succeed when opt_prof is off");
+ expect_d_eq(mallctl("prof.active", &old, &len, &active, len), 0,
+ "Setting prof_active to false should succeed when opt_prof is off");
+ expect_false(old, "prof_active should be false when opt_prof is off");
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas) {
+#define TEST_STATS_ARENAS(t, name) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ expect_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
+ NULL, 0), 0, "Unexpected mallctl() failure"); \
+} while (0)
+
+ TEST_STATS_ARENAS(unsigned, nthreads);
+ TEST_STATS_ARENAS(const char *, dss);
+ TEST_STATS_ARENAS(ssize_t, dirty_decay_ms);
+ TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms);
+ TEST_STATS_ARENAS(size_t, pactive);
+ TEST_STATS_ARENAS(size_t, pdirty);
+
+#undef TEST_STATS_ARENAS
+}
+TEST_END
+
+static void
+alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
+ UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
+ *(bool *)extra = true;
+}
+
+static void
+dalloc_hook(void *extra, UNUSED hook_dalloc_t type,
+ UNUSED void *address, UNUSED uintptr_t args_raw[3]) {
+ *(bool *)extra = true;
+}
+
+TEST_BEGIN(test_hooks) {
+ bool hook_called = false;
+ hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
+ void *handle = NULL;
+ size_t sz = sizeof(handle);
+ int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
+ sizeof(hooks));
+ expect_d_eq(err, 0, "Hook installation failed");
+ expect_ptr_ne(handle, NULL, "Hook installation gave null handle");
+ void *ptr = mallocx(1, 0);
+ expect_true(hook_called, "Alloc hook not called");
+ hook_called = false;
+ free(ptr);
+ expect_true(hook_called, "Free hook not called");
+
+ err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
+ sizeof(handle));
+ expect_d_eq(err, 0, "Hook removal failed");
+ hook_called = false;
+ ptr = mallocx(1, 0);
+ free(ptr);
+ expect_false(hook_called, "Hook called after removal");
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_exhaustion) {
+ bool hook_called = false;
+ hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
+
+ void *handle;
+ void *handles[HOOK_MAX];
+ size_t sz = sizeof(handle);
+ int err;
+ for (int i = 0; i < HOOK_MAX; i++) {
+ handle = NULL;
+ err = mallctl("experimental.hooks.install", &handle, &sz,
+ &hooks, sizeof(hooks));
+ expect_d_eq(err, 0, "Error installation hooks");
+ expect_ptr_ne(handle, NULL, "Got NULL handle");
+ handles[i] = handle;
+ }
+ err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
+ sizeof(hooks));
+ expect_d_eq(err, EAGAIN, "Should have failed hook installation");
+ for (int i = 0; i < HOOK_MAX; i++) {
+ err = mallctl("experimental.hooks.remove", NULL, NULL,
+ &handles[i], sizeof(handles[i]));
+ expect_d_eq(err, 0, "Hook removal failed");
+ }
+ /* Insertion failed, but then we removed some; it should work now. */
+ handle = NULL;
+ err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
+ sizeof(hooks));
+ expect_d_eq(err, 0, "Hook insertion failed");
+ expect_ptr_ne(handle, NULL, "Got NULL handle");
+ err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
+ sizeof(handle));
+ expect_d_eq(err, 0, "Hook removal failed");
+}
+TEST_END
+
+TEST_BEGIN(test_thread_idle) {
+ /*
+ * We're cheating a little bit in this test, and inferring things about
+ * implementation internals (like tcache details). We have to;
+ * thread.idle has no guaranteed effects. We need stats to make these
+ * inferences.
+ */
+ test_skip_if(!config_stats);
+
+ int err;
+ size_t sz;
+ size_t miblen;
+
+ bool tcache_enabled = false;
+ sz = sizeof(tcache_enabled);
+ err = mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ test_skip_if(!tcache_enabled);
+
+ size_t tcache_max;
+ sz = sizeof(tcache_max);
+ err = mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ test_skip_if(tcache_max == 0);
+
+ unsigned arena_ind;
+ sz = sizeof(arena_ind);
+ err = mallctl("thread.arena", &arena_ind, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+
+ /* We're going to do an allocation of size 1, which we know is small. */
+ size_t mib[5];
+ miblen = sizeof(mib)/sizeof(mib[0]);
+ err = mallctlnametomib("stats.arenas.0.small.ndalloc", mib, &miblen);
+ expect_d_eq(err, 0, "");
+ mib[2] = arena_ind;
+
+ /*
+ * This alloc and dalloc should leave something in the tcache, in a
+ * small size's cache bin.
+ */
+ void *ptr = mallocx(1, 0);
+ dallocx(ptr, 0);
+
+ uint64_t epoch;
+ err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
+ expect_d_eq(err, 0, "");
+
+ uint64_t small_dalloc_pre_idle;
+ sz = sizeof(small_dalloc_pre_idle);
+ err = mallctlbymib(mib, miblen, &small_dalloc_pre_idle, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+
+ err = mallctl("thread.idle", NULL, NULL, NULL, 0);
+ expect_d_eq(err, 0, "");
+
+ err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
+ expect_d_eq(err, 0, "");
+
+ uint64_t small_dalloc_post_idle;
+ sz = sizeof(small_dalloc_post_idle);
+ err = mallctlbymib(mib, miblen, &small_dalloc_post_idle, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+
+ expect_u64_lt(small_dalloc_pre_idle, small_dalloc_post_idle,
+ "Purge didn't flush the tcache");
+}
+TEST_END
+
+TEST_BEGIN(test_thread_peak) {
+ test_skip_if(!config_stats);
+
+ /*
+ * We don't commit to any stable amount of accuracy for peak tracking
+ * (in practice, when this test was written, we made sure to be within
+ * 100k). But 10MB is big for more or less any definition of big.
+ */
+ size_t big_size = 10 * 1024 * 1024;
+ size_t small_size = 256;
+
+ void *ptr;
+ int err;
+ size_t sz;
+ uint64_t peak;
+ sz = sizeof(uint64_t);
+
+ err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
+ expect_d_eq(err, 0, "");
+ ptr = mallocx(SC_SMALL_MAXCLASS, 0);
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Missed an update");
+ free(ptr);
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Freeing changed peak");
+ ptr = mallocx(big_size, 0);
+ free(ptr);
+ /*
+ * The peak should have hit big_size in the last two lines, even though
+ * the net allocated bytes has since dropped back down to zero. We
+ * should have noticed the peak change without having down any mallctl
+ * calls while net allocated bytes was high.
+ */
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ expect_u64_ge(peak, big_size, "Missed a peak change.");
+
+ /* Allocate big_size, but using small allocations. */
+ size_t nallocs = big_size / small_size;
+ void **ptrs = calloc(nallocs, sizeof(void *));
+ err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
+ expect_d_eq(err, 0, "");
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ expect_u64_eq(0, peak, "Missed a reset.");
+ for (size_t i = 0; i < nallocs; i++) {
+ ptrs[i] = mallocx(small_size, 0);
+ }
+ for (size_t i = 0; i < nallocs; i++) {
+ free(ptrs[i]);
+ }
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ /*
+ * We don't guarantee exactness; make sure we're within 10% of the peak,
+ * though.
+ */
+ expect_u64_ge(peak, nallocx(small_size, 0) * nallocs * 9 / 10,
+ "Missed some peak changes.");
+ expect_u64_le(peak, nallocx(small_size, 0) * nallocs * 11 / 10,
+ "Overcounted peak changes.");
+ free(ptrs);
+}
+TEST_END
+
+typedef struct activity_test_data_s activity_test_data_t;
+struct activity_test_data_s {
+ uint64_t obtained_alloc;
+ uint64_t obtained_dalloc;
+};
+
+static void
+activity_test_callback(void *uctx, uint64_t alloc, uint64_t dalloc) {
+ activity_test_data_t *test_data = (activity_test_data_t *)uctx;
+ test_data->obtained_alloc = alloc;
+ test_data->obtained_dalloc = dalloc;
+}
+
+TEST_BEGIN(test_thread_activity_callback) {
+ test_skip_if(!config_stats);
+
+ const size_t big_size = 10 * 1024 * 1024;
+ void *ptr;
+ int err;
+ size_t sz;
+
+ uint64_t *allocatedp;
+ uint64_t *deallocatedp;
+ sz = sizeof(allocatedp);
+ err = mallctl("thread.allocatedp", &allocatedp, &sz, NULL, 0);
+ assert_d_eq(0, err, "");
+ err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0);
+ assert_d_eq(0, err, "");
+
+ activity_callback_thunk_t old_thunk = {(activity_callback_t)111,
+ (void *)222};
+
+ activity_test_data_t test_data = {333, 444};
+ activity_callback_thunk_t new_thunk =
+ {&activity_test_callback, &test_data};
+
+ sz = sizeof(old_thunk);
+ err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
+ &new_thunk, sizeof(new_thunk));
+ assert_d_eq(0, err, "");
+
+ expect_true(old_thunk.callback == NULL, "Callback already installed");
+ expect_true(old_thunk.uctx == NULL, "Callback data already installed");
+
+ ptr = mallocx(big_size, 0);
+ expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
+ expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
+
+ free(ptr);
+ expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
+ expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
+
+ sz = sizeof(old_thunk);
+ new_thunk = (activity_callback_thunk_t){ NULL, NULL };
+ err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
+ &new_thunk, sizeof(new_thunk));
+ assert_d_eq(0, err, "");
+
+ expect_true(old_thunk.callback == &activity_test_callback, "");
+ expect_true(old_thunk.uctx == &test_data, "");
+
+ /* Inserting NULL should have turned off tracking. */
+ test_data.obtained_alloc = 333;
+ test_data.obtained_dalloc = 444;
+ ptr = mallocx(big_size, 0);
+ free(ptr);
+ expect_u64_eq(333, test_data.obtained_alloc, "");
+ expect_u64_eq(444, test_data.obtained_dalloc, "");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_mallctl_errors,
+ test_mallctlnametomib_errors,
+ test_mallctlbymib_errors,
+ test_mallctl_read_write,
+ test_mallctlnametomib_short_mib,
+ test_mallctlnametomib_short_name,
+ test_mallctlmibnametomib,
+ test_mallctlbymibname,
+ test_mallctl_config,
+ test_mallctl_opt,
+ test_manpage_example,
+ test_tcache_none,
+ test_tcache,
+ test_thread_arena,
+ test_arena_i_initialized,
+ test_arena_i_dirty_decay_ms,
+ test_arena_i_muzzy_decay_ms,
+ test_arena_i_purge,
+ test_arena_i_decay,
+ test_arena_i_dss,
+ test_arena_i_retain_grow_limit,
+ test_arenas_dirty_decay_ms,
+ test_arenas_muzzy_decay_ms,
+ test_arenas_constants,
+ test_arenas_bin_constants,
+ test_arenas_lextent_constants,
+ test_arenas_create,
+ test_arenas_lookup,
+ test_prof_active,
+ test_stats_arenas,
+ test_hooks,
+ test_hooks_exhaustion,
+ test_thread_idle,
+ test_thread_peak,
+ test_thread_activity_callback);
+}
diff --git a/deps/jemalloc/test/unit/malloc_conf_2.c b/deps/jemalloc/test/unit/malloc_conf_2.c
new file mode 100644
index 0000000..ecfa499
--- /dev/null
+++ b/deps/jemalloc/test/unit/malloc_conf_2.c
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+const char *malloc_conf = "dirty_decay_ms:1000";
+const char *malloc_conf_2_conf_harder = "dirty_decay_ms:1234";
+
+TEST_BEGIN(test_malloc_conf_2) {
+#ifdef _WIN32
+ bool windows = true;
+#else
+ bool windows = false;
+#endif
+ /* Windows doesn't support weak symbol linker trickery. */
+ test_skip_if(windows);
+
+ ssize_t dirty_decay_ms;
+ size_t sz = sizeof(dirty_decay_ms);
+
+ int err = mallctl("opt.dirty_decay_ms", &dirty_decay_ms, &sz, NULL, 0);
+ assert_d_eq(err, 0, "Unexpected mallctl failure");
+ expect_zd_eq(dirty_decay_ms, 1234,
+ "malloc_conf_2 setting didn't take effect");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_malloc_conf_2);
+}
diff --git a/deps/jemalloc/test/unit/malloc_conf_2.sh b/deps/jemalloc/test/unit/malloc_conf_2.sh
new file mode 100644
index 0000000..2c780f1
--- /dev/null
+++ b/deps/jemalloc/test/unit/malloc_conf_2.sh
@@ -0,0 +1 @@
+export MALLOC_CONF="dirty_decay_ms:500"
diff --git a/deps/jemalloc/test/unit/malloc_io.c b/deps/jemalloc/test/unit/malloc_io.c
new file mode 100644
index 0000000..385f745
--- /dev/null
+++ b/deps/jemalloc/test/unit/malloc_io.c
@@ -0,0 +1,268 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_malloc_strtoumax_no_endptr) {
+ int err;
+
+ set_errno(0);
+ expect_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
+ err = get_errno();
+ expect_d_eq(err, 0, "Unexpected failure");
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_strtoumax) {
+ struct test_s {
+ const char *input;
+ const char *expected_remainder;
+ int base;
+ int expected_errno;
+ const char *expected_errno_name;
+ uintmax_t expected_x;
+ };
+#define ERR(e) e, #e
+#define KUMAX(x) ((uintmax_t)x##ULL)
+#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL)
+ struct test_s tests[] = {
+ {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
+ {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
+ {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX},
+
+ {"", "", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
+
+ {"42", "", 0, ERR(0), KUMAX(42)},
+ {"+42", "", 0, ERR(0), KUMAX(42)},
+ {"-42", "", 0, ERR(0), KSMAX(-42)},
+ {"042", "", 0, ERR(0), KUMAX(042)},
+ {"+042", "", 0, ERR(0), KUMAX(042)},
+ {"-042", "", 0, ERR(0), KSMAX(-042)},
+ {"0x42", "", 0, ERR(0), KUMAX(0x42)},
+ {"+0x42", "", 0, ERR(0), KUMAX(0x42)},
+ {"-0x42", "", 0, ERR(0), KSMAX(-0x42)},
+
+ {"0", "", 0, ERR(0), KUMAX(0)},
+ {"1", "", 0, ERR(0), KUMAX(1)},
+
+ {"42", "", 0, ERR(0), KUMAX(42)},
+ {" 42", "", 0, ERR(0), KUMAX(42)},
+ {"42 ", " ", 0, ERR(0), KUMAX(42)},
+ {"0x", "x", 0, ERR(0), KUMAX(0)},
+ {"42x", "x", 0, ERR(0), KUMAX(42)},
+
+ {"07", "", 0, ERR(0), KUMAX(7)},
+ {"010", "", 0, ERR(0), KUMAX(8)},
+ {"08", "8", 0, ERR(0), KUMAX(0)},
+ {"0_", "_", 0, ERR(0), KUMAX(0)},
+
+ {"0x", "x", 0, ERR(0), KUMAX(0)},
+ {"0X", "X", 0, ERR(0), KUMAX(0)},
+ {"0xg", "xg", 0, ERR(0), KUMAX(0)},
+ {"0XA", "", 0, ERR(0), KUMAX(10)},
+
+ {"010", "", 10, ERR(0), KUMAX(10)},
+ {"0x3", "x3", 10, ERR(0), KUMAX(0)},
+
+ {"12", "2", 2, ERR(0), KUMAX(1)},
+ {"78", "8", 8, ERR(0), KUMAX(7)},
+ {"9a", "a", 10, ERR(0), KUMAX(9)},
+ {"9A", "A", 10, ERR(0), KUMAX(9)},
+ {"fg", "g", 16, ERR(0), KUMAX(15)},
+ {"FG", "G", 16, ERR(0), KUMAX(15)},
+ {"0xfg", "g", 16, ERR(0), KUMAX(15)},
+ {"0XFG", "G", 16, ERR(0), KUMAX(15)},
+ {"z_", "_", 36, ERR(0), KUMAX(35)},
+ {"Z_", "_", 36, ERR(0), KUMAX(35)}
+ };
+#undef ERR
+#undef KUMAX
+#undef KSMAX
+ unsigned i;
+
+ for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
+ struct test_s *test = &tests[i];
+ int err;
+ uintmax_t result;
+ char *remainder;
+
+ set_errno(0);
+ result = malloc_strtoumax(test->input, &remainder, test->base);
+ err = get_errno();
+ expect_d_eq(err, test->expected_errno,
+ "Expected errno %s for \"%s\", base %d",
+ test->expected_errno_name, test->input, test->base);
+ expect_str_eq(remainder, test->expected_remainder,
+ "Unexpected remainder for \"%s\", base %d",
+ test->input, test->base);
+ if (err == 0) {
+ expect_ju_eq(result, test->expected_x,
+ "Unexpected result for \"%s\", base %d",
+ test->input, test->base);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_snprintf_truncated) {
+#define BUFLEN 15
+ char buf[BUFLEN];
+ size_t result;
+ size_t len;
+#define TEST(expected_str_untruncated, ...) do { \
+ result = malloc_snprintf(buf, len, __VA_ARGS__); \
+ expect_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
+ "Unexpected string inequality (\"%s\" vs \"%s\")", \
+ buf, expected_str_untruncated); \
+ expect_zu_eq(result, strlen(expected_str_untruncated), \
+ "Unexpected result"); \
+} while (0)
+
+ for (len = 1; len < BUFLEN; len++) {
+ TEST("012346789", "012346789");
+ TEST("a0123b", "a%sb", "0123");
+ TEST("a01234567", "a%s%s", "0123", "4567");
+ TEST("a0123 ", "a%-6s", "0123");
+ TEST("a 0123", "a%6s", "0123");
+ TEST("a 012", "a%6.3s", "0123");
+ TEST("a 012", "a%*.*s", 6, 3, "0123");
+ TEST("a 123b", "a% db", 123);
+ TEST("a123b", "a%-db", 123);
+ TEST("a-123b", "a%-db", -123);
+ TEST("a+123b", "a%+db", 123);
+ }
+#undef BUFLEN
+#undef TEST
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_snprintf) {
+#define BUFLEN 128
+ char buf[BUFLEN];
+ size_t result;
+#define TEST(expected_str, ...) do { \
+ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
+ expect_str_eq(buf, expected_str, "Unexpected output"); \
+ expect_zu_eq(result, strlen(expected_str), "Unexpected result");\
+} while (0)
+
+ TEST("hello", "hello");
+
+ TEST("50%, 100%", "50%%, %d%%", 100);
+
+ TEST("a0123b", "a%sb", "0123");
+
+ TEST("a 0123b", "a%5sb", "0123");
+ TEST("a 0123b", "a%*sb", 5, "0123");
+
+ TEST("a0123 b", "a%-5sb", "0123");
+ TEST("a0123b", "a%*sb", -1, "0123");
+ TEST("a0123 b", "a%*sb", -5, "0123");
+ TEST("a0123 b", "a%-*sb", -5, "0123");
+
+ TEST("a012b", "a%.3sb", "0123");
+ TEST("a012b", "a%.*sb", 3, "0123");
+ TEST("a0123b", "a%.*sb", -3, "0123");
+
+ TEST("a 012b", "a%5.3sb", "0123");
+ TEST("a 012b", "a%5.*sb", 3, "0123");
+ TEST("a 012b", "a%*.3sb", 5, "0123");
+ TEST("a 012b", "a%*.*sb", 5, 3, "0123");
+ TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
+
+ TEST("_abcd_", "_%x_", 0xabcd);
+ TEST("_0xabcd_", "_%#x_", 0xabcd);
+ TEST("_1234_", "_%o_", 01234);
+ TEST("_01234_", "_%#o_", 01234);
+ TEST("_1234_", "_%u_", 1234);
+ TEST("01234", "%05u", 1234);
+
+ TEST("_1234_", "_%d_", 1234);
+ TEST("_ 1234_", "_% d_", 1234);
+ TEST("_+1234_", "_%+d_", 1234);
+ TEST("_-1234_", "_%d_", -1234);
+ TEST("_-1234_", "_% d_", -1234);
+ TEST("_-1234_", "_%+d_", -1234);
+
+ /*
+ * Morally, we should test these too, but 0-padded signed types are not
+ * yet supported.
+ *
+ * TEST("01234", "%05", 1234);
+ * TEST("-1234", "%05d", -1234);
+ * TEST("-01234", "%06d", -1234);
+ */
+
+ TEST("_-1234_", "_%d_", -1234);
+ TEST("_1234_", "_%d_", 1234);
+ TEST("_-1234_", "_%i_", -1234);
+ TEST("_1234_", "_%i_", 1234);
+ TEST("_01234_", "_%#o_", 01234);
+ TEST("_1234_", "_%u_", 1234);
+ TEST("_0x1234abc_", "_%#x_", 0x1234abc);
+ TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
+ TEST("_c_", "_%c_", 'c');
+ TEST("_string_", "_%s_", "string");
+ TEST("_0x42_", "_%p_", ((void *)0x42));
+
+ TEST("_-1234_", "_%ld_", ((long)-1234));
+ TEST("_1234_", "_%ld_", ((long)1234));
+ TEST("_-1234_", "_%li_", ((long)-1234));
+ TEST("_1234_", "_%li_", ((long)1234));
+ TEST("_01234_", "_%#lo_", ((long)01234));
+ TEST("_1234_", "_%lu_", ((long)1234));
+ TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
+
+ TEST("_-1234_", "_%lld_", ((long long)-1234));
+ TEST("_1234_", "_%lld_", ((long long)1234));
+ TEST("_-1234_", "_%lli_", ((long long)-1234));
+ TEST("_1234_", "_%lli_", ((long long)1234));
+ TEST("_01234_", "_%#llo_", ((long long)01234));
+ TEST("_1234_", "_%llu_", ((long long)1234));
+ TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
+
+ TEST("_-1234_", "_%qd_", ((long long)-1234));
+ TEST("_1234_", "_%qd_", ((long long)1234));
+ TEST("_-1234_", "_%qi_", ((long long)-1234));
+ TEST("_1234_", "_%qi_", ((long long)1234));
+ TEST("_01234_", "_%#qo_", ((long long)01234));
+ TEST("_1234_", "_%qu_", ((long long)1234));
+ TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
+
+ TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
+ TEST("_1234_", "_%jd_", ((intmax_t)1234));
+ TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
+ TEST("_1234_", "_%ji_", ((intmax_t)1234));
+ TEST("_01234_", "_%#jo_", ((intmax_t)01234));
+ TEST("_1234_", "_%ju_", ((intmax_t)1234));
+ TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
+ TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
+
+ TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
+ TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
+ TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
+ TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
+
+ TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
+ TEST("_1234_", "_%zd_", ((ssize_t)1234));
+ TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
+ TEST("_1234_", "_%zi_", ((ssize_t)1234));
+ TEST("_01234_", "_%#zo_", ((ssize_t)01234));
+ TEST("_1234_", "_%zu_", ((ssize_t)1234));
+ TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
+ TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
+#undef BUFLEN
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_malloc_strtoumax_no_endptr,
+ test_malloc_strtoumax,
+ test_malloc_snprintf_truncated,
+ test_malloc_snprintf);
+}
diff --git a/deps/jemalloc/test/unit/math.c b/deps/jemalloc/test/unit/math.c
new file mode 100644
index 0000000..a32767c
--- /dev/null
+++ b/deps/jemalloc/test/unit/math.c
@@ -0,0 +1,390 @@
+#include "test/jemalloc_test.h"
+
+#define MAX_REL_ERR 1.0e-9
+#define MAX_ABS_ERR 1.0e-9
+
+#include <float.h>
+
+#ifdef __PGI
+#undef INFINITY
+#endif
+
+#ifndef INFINITY
+#define INFINITY (DBL_MAX + DBL_MAX)
+#endif
+
+static bool
+double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) {
+ double rel_err;
+
+ if (fabs(a - b) < max_abs_err) {
+ return true;
+ }
+ rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
+ return (rel_err < max_rel_err);
+}
+
+static uint64_t
+factorial(unsigned x) {
+ uint64_t ret = 1;
+ unsigned i;
+
+ for (i = 2; i <= x; i++) {
+ ret *= (uint64_t)i;
+ }
+
+ return ret;
+}
+
+TEST_BEGIN(test_ln_gamma_factorial) {
+ unsigned x;
+
+ /* exp(ln_gamma(x)) == (x-1)! for integer x. */
+ for (x = 1; x <= 21; x++) {
+ expect_true(double_eq_rel(exp(ln_gamma(x)),
+ (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect factorial result for x=%u", x);
+ }
+}
+TEST_END
+
+/* Expected ln_gamma([0.0..100.0] increment=0.25). */
+static const double ln_gamma_misc_expected[] = {
+ INFINITY,
+ 1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
+ 0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
+ -0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
+ 0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
+ 0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
+ 1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
+ 2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
+ 3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
+ 5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
+ 6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
+ 8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
+ 9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
+ 11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
+ 12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
+ 14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
+ 16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
+ 18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
+ 19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
+ 21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
+ 23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
+ 25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
+ 27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
+ 29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
+ 32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
+ 34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
+ 36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
+ 38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
+ 40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
+ 43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
+ 45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
+ 47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
+ 50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
+ 52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
+ 54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
+ 57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
+ 59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
+ 62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
+ 64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
+ 67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
+ 69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
+ 72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
+ 74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
+ 77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
+ 79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
+ 82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
+ 85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
+ 87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
+ 90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
+ 93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
+ 95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
+ 98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
+ 101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
+ 103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
+ 106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
+ 109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
+ 112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
+ 114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
+ 117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
+ 120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
+ 123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
+ 126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
+ 129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
+ 131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
+ 134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
+ 137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
+ 140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
+ 143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
+ 146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
+ 149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
+ 152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
+ 155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
+ 158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
+ 161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
+ 164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
+ 167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
+ 170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
+ 173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
+ 176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
+ 179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
+ 182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
+ 185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
+ 188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
+ 191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
+ 194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
+ 197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
+ 201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
+ 204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
+ 207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
+ 210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
+ 213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
+ 216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
+ 219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
+ 223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
+ 226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
+ 229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
+ 232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
+ 235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
+ 238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
+ 242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
+ 245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
+ 248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
+ 251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
+ 255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
+ 258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
+ 261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
+ 264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
+ 268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
+ 271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
+ 274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
+ 278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
+ 281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
+ 284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
+ 287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
+ 291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
+ 294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
+ 297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
+ 301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
+ 304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
+ 308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
+ 311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
+ 314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
+ 318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
+ 321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
+ 324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
+ 328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
+ 331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
+ 335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
+ 338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
+ 341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
+ 345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
+ 348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
+ 352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
+ 355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
+ 359.13420536957539753
+};
+
+TEST_BEGIN(test_ln_gamma_misc) {
+ unsigned i;
+
+ for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
+ double x = (double)i * 0.25;
+ expect_true(double_eq_rel(ln_gamma(x),
+ ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect ln_gamma result for i=%u", i);
+ }
+}
+TEST_END
+
+/* Expected pt_norm([0.01..0.99] increment=0.01). */
+static const double pt_norm_expected[] = {
+ -INFINITY,
+ -2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
+ -1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
+ -1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
+ -1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
+ -1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
+ -0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
+ -0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
+ -0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
+ -0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
+ -0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
+ -0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
+ -0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
+ -0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
+ -0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
+ -0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
+ -0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
+ -0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
+ 0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
+ 0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
+ 0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
+ 0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
+ 0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
+ 0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
+ 0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
+ 0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
+ 0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
+ 0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
+ 0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
+ 1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
+ 1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
+ 1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
+ 1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
+ 1.88079360815125041, 2.05374891063182208, 2.32634787404084076
+};
+
+TEST_BEGIN(test_pt_norm) {
+ unsigned i;
+
+ for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
+ double p = (double)i * 0.01;
+ expect_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
+ MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect pt_norm result for i=%u", i);
+ }
+}
+TEST_END
+
+/*
+ * Expected pt_chi2(p=[0.01..0.99] increment=0.07,
+ * df={0.1, 1.1, 10.1, 100.1, 1000.1}).
+ */
+static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
+static const double pt_chi2_expected[] = {
+ 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
+ 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
+ 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
+ 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
+ 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
+
+ 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
+ 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
+ 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
+ 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
+ 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
+
+ 2.606673548632508, 4.602913725294877, 5.646152813924212,
+ 6.488971315540869, 7.249823275816285, 7.977314231410841,
+ 8.700354939944047, 9.441728024225892, 10.224338321374127,
+ 11.076435368801061, 12.039320937038386, 13.183878752697167,
+ 14.657791935084575, 16.885728216339373, 23.361991680031817,
+
+ 70.14844087392152, 80.92379498849355, 85.53325420085891,
+ 88.94433120715347, 91.83732712857017, 94.46719943606301,
+ 96.96896479994635, 99.43412843510363, 101.94074719829733,
+ 104.57228644307247, 107.43900093448734, 110.71844673417287,
+ 114.76616819871325, 120.57422505959563, 135.92318818757556,
+
+ 899.0072447849649, 937.9271278858220, 953.8117189560207,
+ 965.3079371501154, 974.8974061207954, 983.4936235182347,
+ 991.5691170518946, 999.4334123954690, 1007.3391826856553,
+ 1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
+ 1046.4872561869577, 1063.5717461999654, 1107.0741966053859
+};
+
+TEST_BEGIN(test_pt_chi2) {
+ unsigned i, j;
+ unsigned e = 0;
+
+ for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
+ double df = pt_chi2_df[i];
+ double ln_gamma_df = ln_gamma(df * 0.5);
+ for (j = 1; j < 100; j += 7) {
+ double p = (double)j * 0.01;
+ expect_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
+ pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect pt_chi2 result for i=%u, j=%u", i, j);
+ e++;
+ }
+ }
+}
+TEST_END
+
+/*
+ * Expected pt_gamma(p=[0.1..0.99] increment=0.07,
+ * shape=[0.5..3.0] increment=0.5).
+ */
+static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
+static const double pt_gamma_expected[] = {
+ 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
+ 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
+ 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
+ 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
+ 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
+
+ 0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
+ 0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
+ 0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
+ 1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
+ 1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
+
+ 0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
+ 0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
+ 1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
+ 1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
+ 2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
+
+ 0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
+ 0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
+ 1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
+ 2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
+ 3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
+
+ 0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
+ 1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
+ 1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
+ 2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
+ 4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
+
+ 0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
+ 1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
+ 2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
+ 3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
+ 4.7230515633946677, 5.6417477865306020, 8.4059469148854635
+};
+
+TEST_BEGIN(test_pt_gamma_shape) {
+ unsigned i, j;
+ unsigned e = 0;
+
+ for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
+ double shape = pt_gamma_shape[i];
+ double ln_gamma_shape = ln_gamma(shape);
+ for (j = 1; j < 100; j += 7) {
+ double p = (double)j * 0.01;
+ expect_true(double_eq_rel(pt_gamma(p, shape, 1.0,
+ ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
+ MAX_ABS_ERR),
+ "Incorrect pt_gamma result for i=%u, j=%u", i, j);
+ e++;
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_pt_gamma_scale) {
+ double shape = 1.0;
+ double ln_gamma_shape = ln_gamma(shape);
+
+ expect_true(double_eq_rel(
+ pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
+ pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
+ MAX_ABS_ERR),
+ "Scale should be trivially equivalent to external multiplication");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_ln_gamma_factorial,
+ test_ln_gamma_misc,
+ test_pt_norm,
+ test_pt_chi2,
+ test_pt_gamma_shape,
+ test_pt_gamma_scale);
+}
diff --git a/deps/jemalloc/test/unit/mpsc_queue.c b/deps/jemalloc/test/unit/mpsc_queue.c
new file mode 100644
index 0000000..895edf8
--- /dev/null
+++ b/deps/jemalloc/test/unit/mpsc_queue.c
@@ -0,0 +1,304 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/mpsc_queue.h"
+
+typedef struct elem_s elem_t;
+typedef ql_head(elem_t) elem_list_t;
+typedef mpsc_queue(elem_t) elem_mpsc_queue_t;
+struct elem_s {
+ int thread;
+ int idx;
+ ql_elm(elem_t) link;
+};
+
+/* Include both proto and gen to make sure they match up. */
+mpsc_queue_proto(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t,
+ elem_list_t);
+mpsc_queue_gen(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t,
+ elem_list_t, link);
+
+static void
+init_elems_simple(elem_t *elems, int nelems, int thread) {
+ for (int i = 0; i < nelems; i++) {
+ elems[i].thread = thread;
+ elems[i].idx = i;
+ ql_elm_new(&elems[i], link);
+ }
+}
+
+static void
+check_elems_simple(elem_list_t *list, int nelems, int thread) {
+ elem_t *elem;
+ int next_idx = 0;
+ ql_foreach(elem, list, link) {
+ expect_d_lt(next_idx, nelems, "Too many list items");
+ expect_d_eq(thread, elem->thread, "");
+ expect_d_eq(next_idx, elem->idx, "List out of order");
+ next_idx++;
+ }
+}
+
+TEST_BEGIN(test_simple) {
+ enum {NELEMS = 10};
+ elem_t elems[NELEMS];
+ elem_list_t list;
+ elem_mpsc_queue_t queue;
+
+ /* Pop empty queue onto empty list -> empty list */
+ ql_new(&list);
+ elem_mpsc_queue_new(&queue);
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ expect_true(ql_empty(&list), "");
+
+ /* Pop empty queue onto nonempty list -> list unchanged */
+ ql_new(&list);
+ elem_mpsc_queue_new(&queue);
+ init_elems_simple(elems, NELEMS, 0);
+ for (int i = 0; i < NELEMS; i++) {
+ ql_tail_insert(&list, &elems[i], link);
+ }
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ check_elems_simple(&list, NELEMS, 0);
+
+ /* Pop nonempty queue onto empty list -> list takes queue contents */
+ ql_new(&list);
+ elem_mpsc_queue_new(&queue);
+ init_elems_simple(elems, NELEMS, 0);
+ for (int i = 0; i < NELEMS; i++) {
+ elem_mpsc_queue_push(&queue, &elems[i]);
+ }
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ check_elems_simple(&list, NELEMS, 0);
+
+ /* Pop nonempty queue onto nonempty list -> list gains queue contents */
+ ql_new(&list);
+ elem_mpsc_queue_new(&queue);
+ init_elems_simple(elems, NELEMS, 0);
+ for (int i = 0; i < NELEMS / 2; i++) {
+ ql_tail_insert(&list, &elems[i], link);
+ }
+ for (int i = NELEMS / 2; i < NELEMS; i++) {
+ elem_mpsc_queue_push(&queue, &elems[i]);
+ }
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ check_elems_simple(&list, NELEMS, 0);
+
+}
+TEST_END
+
+TEST_BEGIN(test_push_single_or_batch) {
+ enum {
+ BATCH_MAX = 10,
+ /*
+ * We'll push i items one-at-a-time, then i items as a batch,
+ * then i items as a batch again, as i ranges from 1 to
+ * BATCH_MAX. So we need 3 times the sum of the numbers from 1
+ * to BATCH_MAX elements total.
+ */
+ NELEMS = 3 * BATCH_MAX * (BATCH_MAX - 1) / 2
+ };
+ elem_t elems[NELEMS];
+ init_elems_simple(elems, NELEMS, 0);
+ elem_list_t list;
+ ql_new(&list);
+ elem_mpsc_queue_t queue;
+ elem_mpsc_queue_new(&queue);
+ int next_idx = 0;
+ for (int i = 1; i < 10; i++) {
+ /* Push i items 1 at a time. */
+ for (int j = 0; j < i; j++) {
+ elem_mpsc_queue_push(&queue, &elems[next_idx]);
+ next_idx++;
+ }
+ /* Push i items in batch. */
+ for (int j = 0; j < i; j++) {
+ ql_tail_insert(&list, &elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(&queue, &list);
+ expect_true(ql_empty(&list), "Batch push should empty source");
+ /*
+ * Push i items in batch, again. This tests two batches
+ * proceeding one after the other.
+ */
+ for (int j = 0; j < i; j++) {
+ ql_tail_insert(&list, &elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(&queue, &list);
+ expect_true(ql_empty(&list), "Batch push should empty source");
+ }
+ expect_d_eq(NELEMS, next_idx, "Miscomputed number of elems to push.");
+
+ expect_true(ql_empty(&list), "");
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ check_elems_simple(&list, NELEMS, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_multi_op) {
+ enum {NELEMS = 20};
+ elem_t elems[NELEMS];
+ init_elems_simple(elems, NELEMS, 0);
+ elem_list_t push_list;
+ ql_new(&push_list);
+ elem_list_t result_list;
+ ql_new(&result_list);
+ elem_mpsc_queue_t queue;
+ elem_mpsc_queue_new(&queue);
+
+ int next_idx = 0;
+ /* Push first quarter 1-at-a-time. */
+ for (int i = 0; i < NELEMS / 4; i++) {
+ elem_mpsc_queue_push(&queue, &elems[next_idx]);
+ next_idx++;
+ }
+ /* Push second quarter in batch. */
+ for (int i = NELEMS / 4; i < NELEMS / 2; i++) {
+ ql_tail_insert(&push_list, &elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(&queue, &push_list);
+ /* Batch pop all pushed elements. */
+ elem_mpsc_queue_pop_batch(&queue, &result_list);
+ /* Push third quarter in batch. */
+ for (int i = NELEMS / 2; i < 3 * NELEMS / 4; i++) {
+ ql_tail_insert(&push_list, &elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(&queue, &push_list);
+ /* Push last quarter one-at-a-time. */
+ for (int i = 3 * NELEMS / 4; i < NELEMS; i++) {
+ elem_mpsc_queue_push(&queue, &elems[next_idx]);
+ next_idx++;
+ }
+ /* Pop them again. Order of existing list should be preserved. */
+ elem_mpsc_queue_pop_batch(&queue, &result_list);
+
+ check_elems_simple(&result_list, NELEMS, 0);
+
+}
+TEST_END
+
+typedef struct pusher_arg_s pusher_arg_t;
+struct pusher_arg_s {
+ elem_mpsc_queue_t *queue;
+ int thread;
+ elem_t *elems;
+ int nelems;
+};
+
+typedef struct popper_arg_s popper_arg_t;
+struct popper_arg_s {
+ elem_mpsc_queue_t *queue;
+ int npushers;
+ int nelems_per_pusher;
+ int *pusher_counts;
+};
+
+static void *
+thd_pusher(void *void_arg) {
+ pusher_arg_t *arg = (pusher_arg_t *)void_arg;
+ int next_idx = 0;
+ while (next_idx < arg->nelems) {
+ /* Push 10 items in batch. */
+ elem_list_t list;
+ ql_new(&list);
+ int limit = next_idx + 10;
+ while (next_idx < arg->nelems && next_idx < limit) {
+ ql_tail_insert(&list, &arg->elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(arg->queue, &list);
+ /* Push 10 items one-at-a-time. */
+ limit = next_idx + 10;
+ while (next_idx < arg->nelems && next_idx < limit) {
+ elem_mpsc_queue_push(arg->queue, &arg->elems[next_idx]);
+ next_idx++;
+ }
+
+ }
+ return NULL;
+}
+
+static void *
+thd_popper(void *void_arg) {
+ popper_arg_t *arg = (popper_arg_t *)void_arg;
+ int done_pushers = 0;
+ while (done_pushers < arg->npushers) {
+ elem_list_t list;
+ ql_new(&list);
+ elem_mpsc_queue_pop_batch(arg->queue, &list);
+ elem_t *elem;
+ ql_foreach(elem, &list, link) {
+ int thread = elem->thread;
+ int idx = elem->idx;
+ expect_d_eq(arg->pusher_counts[thread], idx,
+ "Thread's pushes reordered");
+ arg->pusher_counts[thread]++;
+ if (arg->pusher_counts[thread]
+ == arg->nelems_per_pusher) {
+ done_pushers++;
+ }
+ }
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_multiple_threads) {
+ enum {
+ NPUSHERS = 4,
+ NELEMS_PER_PUSHER = 1000*1000,
+ };
+ thd_t pushers[NPUSHERS];
+ pusher_arg_t pusher_arg[NPUSHERS];
+
+ thd_t popper;
+ popper_arg_t popper_arg;
+
+ elem_mpsc_queue_t queue;
+ elem_mpsc_queue_new(&queue);
+
+ elem_t *elems = calloc(NPUSHERS * NELEMS_PER_PUSHER, sizeof(elem_t));
+ elem_t *elem_iter = elems;
+ for (int i = 0; i < NPUSHERS; i++) {
+ pusher_arg[i].queue = &queue;
+ pusher_arg[i].thread = i;
+ pusher_arg[i].elems = elem_iter;
+ pusher_arg[i].nelems = NELEMS_PER_PUSHER;
+
+ init_elems_simple(elem_iter, NELEMS_PER_PUSHER, i);
+ elem_iter += NELEMS_PER_PUSHER;
+ }
+ popper_arg.queue = &queue;
+ popper_arg.npushers = NPUSHERS;
+ popper_arg.nelems_per_pusher = NELEMS_PER_PUSHER;
+ int pusher_counts[NPUSHERS] = {0};
+ popper_arg.pusher_counts = pusher_counts;
+
+ thd_create(&popper, thd_popper, (void *)&popper_arg);
+ for (int i = 0; i < NPUSHERS; i++) {
+ thd_create(&pushers[i], thd_pusher, &pusher_arg[i]);
+ }
+
+ thd_join(popper, NULL);
+ for (int i = 0; i < NPUSHERS; i++) {
+ thd_join(pushers[i], NULL);
+ }
+
+ for (int i = 0; i < NPUSHERS; i++) {
+ expect_d_eq(NELEMS_PER_PUSHER, pusher_counts[i], "");
+ }
+
+ free(elems);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_simple,
+ test_push_single_or_batch,
+ test_multi_op,
+ test_multiple_threads);
+}
diff --git a/deps/jemalloc/test/unit/mq.c b/deps/jemalloc/test/unit/mq.c
new file mode 100644
index 0000000..f833f77
--- /dev/null
+++ b/deps/jemalloc/test/unit/mq.c
@@ -0,0 +1,89 @@
+#include "test/jemalloc_test.h"
+
+#define NSENDERS 3
+#define NMSGS 100000
+
+typedef struct mq_msg_s mq_msg_t;
+struct mq_msg_s {
+ mq_msg(mq_msg_t) link;
+};
+mq_gen(static, mq_, mq_t, mq_msg_t, link)
+
+TEST_BEGIN(test_mq_basic) {
+ mq_t mq;
+ mq_msg_t msg;
+
+ expect_false(mq_init(&mq), "Unexpected mq_init() failure");
+ expect_u_eq(mq_count(&mq), 0, "mq should be empty");
+ expect_ptr_null(mq_tryget(&mq),
+ "mq_tryget() should fail when the queue is empty");
+
+ mq_put(&mq, &msg);
+ expect_u_eq(mq_count(&mq), 1, "mq should contain one message");
+ expect_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
+
+ mq_put(&mq, &msg);
+ expect_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
+
+ mq_fini(&mq);
+}
+TEST_END
+
+static void *
+thd_receiver_start(void *arg) {
+ mq_t *mq = (mq_t *)arg;
+ unsigned i;
+
+ for (i = 0; i < (NSENDERS * NMSGS); i++) {
+ mq_msg_t *msg = mq_get(mq);
+ expect_ptr_not_null(msg, "mq_get() should never return NULL");
+ dallocx(msg, 0);
+ }
+ return NULL;
+}
+
+static void *
+thd_sender_start(void *arg) {
+ mq_t *mq = (mq_t *)arg;
+ unsigned i;
+
+ for (i = 0; i < NMSGS; i++) {
+ mq_msg_t *msg;
+ void *p;
+ p = mallocx(sizeof(mq_msg_t), 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ msg = (mq_msg_t *)p;
+ mq_put(mq, msg);
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_mq_threaded) {
+ mq_t mq;
+ thd_t receiver;
+ thd_t senders[NSENDERS];
+ unsigned i;
+
+ expect_false(mq_init(&mq), "Unexpected mq_init() failure");
+
+ thd_create(&receiver, thd_receiver_start, (void *)&mq);
+ for (i = 0; i < NSENDERS; i++) {
+ thd_create(&senders[i], thd_sender_start, (void *)&mq);
+ }
+
+ thd_join(receiver, NULL);
+ for (i = 0; i < NSENDERS; i++) {
+ thd_join(senders[i], NULL);
+ }
+
+ mq_fini(&mq);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_mq_basic,
+ test_mq_threaded);
+}
+
diff --git a/deps/jemalloc/test/unit/mtx.c b/deps/jemalloc/test/unit/mtx.c
new file mode 100644
index 0000000..4aeebc1
--- /dev/null
+++ b/deps/jemalloc/test/unit/mtx.c
@@ -0,0 +1,57 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 2
+#define NINCRS 2000000
+
+TEST_BEGIN(test_mtx_basic) {
+ mtx_t mtx;
+
+ expect_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
+ mtx_lock(&mtx);
+ mtx_unlock(&mtx);
+ mtx_fini(&mtx);
+}
+TEST_END
+
+typedef struct {
+ mtx_t mtx;
+ unsigned x;
+} thd_start_arg_t;
+
+static void *
+thd_start(void *varg) {
+ thd_start_arg_t *arg = (thd_start_arg_t *)varg;
+ unsigned i;
+
+ for (i = 0; i < NINCRS; i++) {
+ mtx_lock(&arg->mtx);
+ arg->x++;
+ mtx_unlock(&arg->mtx);
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_mtx_race) {
+ thd_start_arg_t arg;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ expect_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
+ arg.x = 0;
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start, (void *)&arg);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+ expect_u_eq(arg.x, NTHREADS * NINCRS,
+ "Race-related counter corruption");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_mtx_basic,
+ test_mtx_race);
+}
diff --git a/deps/jemalloc/test/unit/nstime.c b/deps/jemalloc/test/unit/nstime.c
new file mode 100644
index 0000000..56238ab
--- /dev/null
+++ b/deps/jemalloc/test/unit/nstime.c
@@ -0,0 +1,252 @@
+#include "test/jemalloc_test.h"
+
+#define BILLION UINT64_C(1000000000)
+
+TEST_BEGIN(test_nstime_init) {
+ nstime_t nst;
+
+ nstime_init(&nst, 42000000043);
+ expect_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
+ expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+ expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_init2) {
+ nstime_t nst;
+
+ nstime_init2(&nst, 42, 43);
+ expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+ expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_copy) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_init_zero(&nstb);
+ nstime_copy(&nstb, &nsta);
+ expect_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
+ expect_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_compare) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
+ expect_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
+
+ nstime_init2(&nstb, 42, 42);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 1,
+ "nsta should be greater than nstb");
+ expect_d_eq(nstime_compare(&nstb, &nsta), -1,
+ "nstb should be less than nsta");
+
+ nstime_init2(&nstb, 42, 44);
+ expect_d_eq(nstime_compare(&nsta, &nstb), -1,
+ "nsta should be less than nstb");
+ expect_d_eq(nstime_compare(&nstb, &nsta), 1,
+ "nstb should be greater than nsta");
+
+ nstime_init2(&nstb, 41, BILLION - 1);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 1,
+ "nsta should be greater than nstb");
+ expect_d_eq(nstime_compare(&nstb, &nsta), -1,
+ "nstb should be less than nsta");
+
+ nstime_init2(&nstb, 43, 0);
+ expect_d_eq(nstime_compare(&nsta, &nstb), -1,
+ "nsta should be less than nstb");
+ expect_d_eq(nstime_compare(&nstb, &nsta), 1,
+ "nstb should be greater than nsta");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_add) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_add(&nsta, &nstb);
+ nstime_init2(&nstb, 84, 86);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+
+ nstime_init2(&nsta, 42, BILLION - 1);
+ nstime_copy(&nstb, &nsta);
+ nstime_add(&nsta, &nstb);
+ nstime_init2(&nstb, 85, BILLION - 2);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_iadd) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, BILLION - 1);
+ nstime_iadd(&nsta, 1);
+ nstime_init2(&nstb, 43, 0);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+
+ nstime_init2(&nsta, 42, 1);
+ nstime_iadd(&nsta, BILLION + 1);
+ nstime_init2(&nstb, 43, 2);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_subtract) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_subtract(&nsta, &nstb);
+ nstime_init_zero(&nstb);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_init2(&nstb, 41, 44);
+ nstime_subtract(&nsta, &nstb);
+ nstime_init2(&nstb, 0, BILLION - 1);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_isubtract) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_isubtract(&nsta, 42*BILLION + 43);
+ nstime_init_zero(&nstb);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_isubtract(&nsta, 41*BILLION + 44);
+ nstime_init2(&nstb, 0, BILLION - 1);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_imultiply) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_imultiply(&nsta, 10);
+ nstime_init2(&nstb, 420, 430);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect multiplication result");
+
+ nstime_init2(&nsta, 42, 666666666);
+ nstime_imultiply(&nsta, 3);
+ nstime_init2(&nstb, 127, 999999998);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect multiplication result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_idivide) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_idivide(&nsta, 10);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 666666666);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 3);
+ nstime_idivide(&nsta, 3);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect division result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_divide) {
+ nstime_t nsta, nstb, nstc;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_init(&nstc, 1);
+ nstime_add(&nsta, &nstc);
+ expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_init(&nstc, 1);
+ nstime_subtract(&nsta, &nstc);
+ expect_u64_eq(nstime_divide(&nsta, &nstb), 9,
+ "Incorrect division result");
+}
+TEST_END
+
+void
+test_nstime_since_once(nstime_t *t) {
+ nstime_t old_t;
+ nstime_copy(&old_t, t);
+
+ uint64_t ns_since = nstime_ns_since(t);
+ nstime_update(t);
+
+ nstime_t new_t;
+ nstime_copy(&new_t, t);
+ nstime_subtract(&new_t, &old_t);
+
+ expect_u64_ge(nstime_ns(&new_t), ns_since,
+ "Incorrect time since result");
+}
+
+TEST_BEGIN(test_nstime_ns_since) {
+ nstime_t t;
+
+ nstime_init_update(&t);
+ for (uint64_t i = 0; i < 10000; i++) {
+ /* Keeps updating t and verifies ns_since is valid. */
+ test_nstime_since_once(&t);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_monotonic) {
+ nstime_monotonic();
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_nstime_init,
+ test_nstime_init2,
+ test_nstime_copy,
+ test_nstime_compare,
+ test_nstime_add,
+ test_nstime_iadd,
+ test_nstime_subtract,
+ test_nstime_isubtract,
+ test_nstime_imultiply,
+ test_nstime_idivide,
+ test_nstime_divide,
+ test_nstime_ns_since,
+ test_nstime_monotonic);
+}
diff --git a/deps/jemalloc/test/unit/oversize_threshold.c b/deps/jemalloc/test/unit/oversize_threshold.c
new file mode 100644
index 0000000..44a8f76
--- /dev/null
+++ b/deps/jemalloc/test/unit/oversize_threshold.c
@@ -0,0 +1,133 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ctl.h"
+
+static void
+arena_mallctl(const char *mallctl_str, unsigned arena, void *oldp,
+ size_t *oldlen, void *newp, size_t newlen) {
+ int err;
+ char buf[100];
+ malloc_snprintf(buf, sizeof(buf), mallctl_str, arena);
+
+ err = mallctl(buf, oldp, oldlen, newp, newlen);
+ expect_d_eq(0, err, "Mallctl failed; %s", buf);
+}
+
+TEST_BEGIN(test_oversize_threshold_get_set) {
+ int err;
+ size_t old_threshold;
+ size_t new_threshold;
+ size_t threshold_sz = sizeof(old_threshold);
+
+ unsigned arena;
+ size_t arena_sz = sizeof(arena);
+ err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0);
+ expect_d_eq(0, err, "Arena creation failed");
+
+ /* Just a write. */
+ new_threshold = 1024 * 1024;
+ arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL,
+ &new_threshold, threshold_sz);
+
+ /* Read and write */
+ new_threshold = 2 * 1024 * 1024;
+ arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold,
+ &threshold_sz, &new_threshold, threshold_sz);
+ expect_zu_eq(1024 * 1024, old_threshold, "Should have read old value");
+
+ /* Just a read */
+ arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold,
+ &threshold_sz, NULL, 0);
+ expect_zu_eq(2 * 1024 * 1024, old_threshold, "Should have read old value");
+}
+TEST_END
+
+static size_t max_purged = 0;
+static bool
+purge_forced_record_max(extent_hooks_t* hooks, void *addr, size_t sz,
+ size_t offset, size_t length, unsigned arena_ind) {
+ if (length > max_purged) {
+ max_purged = length;
+ }
+ return false;
+}
+
+static bool
+dalloc_record_max(extent_hooks_t *extent_hooks, void *addr, size_t sz,
+ bool comitted, unsigned arena_ind) {
+ if (sz > max_purged) {
+ max_purged = sz;
+ }
+ return false;
+}
+
+extent_hooks_t max_recording_extent_hooks;
+
+TEST_BEGIN(test_oversize_threshold) {
+ max_recording_extent_hooks = ehooks_default_extent_hooks;
+ max_recording_extent_hooks.purge_forced = &purge_forced_record_max;
+ max_recording_extent_hooks.dalloc = &dalloc_record_max;
+
+ extent_hooks_t *extent_hooks = &max_recording_extent_hooks;
+
+ int err;
+
+ unsigned arena;
+ size_t arena_sz = sizeof(arena);
+ err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0);
+ expect_d_eq(0, err, "Arena creation failed");
+ arena_mallctl("arena.%u.extent_hooks", arena, NULL, NULL, &extent_hooks,
+ sizeof(extent_hooks));
+
+ /*
+ * This test will fundamentally race with purging, since we're going to
+ * check the dirty stats to see if our oversized allocation got purged.
+ * We don't want other purging to happen accidentally. We can't just
+ * disable purging entirely, though, since that will also disable
+ * oversize purging. Just set purging intervals to be very large.
+ */
+ ssize_t decay_ms = 100 * 1000;
+ ssize_t decay_ms_sz = sizeof(decay_ms);
+ arena_mallctl("arena.%u.dirty_decay_ms", arena, NULL, NULL, &decay_ms,
+ decay_ms_sz);
+ arena_mallctl("arena.%u.muzzy_decay_ms", arena, NULL, NULL, &decay_ms,
+ decay_ms_sz);
+
+ /* Clean everything out. */
+ arena_mallctl("arena.%u.purge", arena, NULL, NULL, NULL, 0);
+ max_purged = 0;
+
+ /* Set threshold to 1MB. */
+ size_t threshold = 1024 * 1024;
+ size_t threshold_sz = sizeof(threshold);
+ arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL,
+ &threshold, threshold_sz);
+
+ /* Allocating and freeing half a megabyte should leave them dirty. */
+ void *ptr = mallocx(512 * 1024, MALLOCX_ARENA(arena));
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ if (!is_background_thread_enabled()) {
+ expect_zu_lt(max_purged, 512 * 1024, "Expected no 512k purge");
+ }
+
+ /* Purge again to reset everything out. */
+ arena_mallctl("arena.%u.purge", arena, NULL, NULL, NULL, 0);
+ max_purged = 0;
+
+ /*
+ * Allocating and freeing 2 megabytes should have them purged because of
+ * the oversize threshold.
+ */
+ ptr = mallocx(2 * 1024 * 1024, MALLOCX_ARENA(arena));
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ expect_zu_ge(max_purged, 2 * 1024 * 1024, "Expected a 2MB purge");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_oversize_threshold_get_set,
+ test_oversize_threshold);
+}
+
diff --git a/deps/jemalloc/test/unit/pa.c b/deps/jemalloc/test/unit/pa.c
new file mode 100644
index 0000000..b1e2f6e
--- /dev/null
+++ b/deps/jemalloc/test/unit/pa.c
@@ -0,0 +1,126 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/pa.h"
+
+static void *
+alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ void *ret = pages_map(new_addr, size, alignment, commit);
+ return ret;
+}
+
+static bool
+merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ return !maps_coalesce;
+}
+
+static bool
+split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ return !maps_coalesce;
+}
+
+static void
+init_test_extent_hooks(extent_hooks_t *hooks) {
+ /*
+ * The default hooks are mostly fine for testing. A few of them,
+ * though, access globals (alloc for dss setting in an arena, split and
+ * merge touch the global emap to find head state. The first of these
+ * can be fixed by keeping that state with the hooks, where it logically
+ * belongs. The second, though, we can only fix when we use the extent
+ * hook API.
+ */
+ memcpy(hooks, &ehooks_default_extent_hooks, sizeof(extent_hooks_t));
+ hooks->alloc = &alloc_hook;
+ hooks->merge = &merge_hook;
+ hooks->split = &split_hook;
+}
+
+typedef struct test_data_s test_data_t;
+struct test_data_s {
+ pa_shard_t shard;
+ pa_central_t central;
+ base_t *base;
+ emap_t emap;
+ pa_shard_stats_t stats;
+ malloc_mutex_t stats_mtx;
+ extent_hooks_t hooks;
+};
+
+test_data_t *init_test_data(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
+ test_data_t *test_data = calloc(1, sizeof(test_data_t));
+ assert_ptr_not_null(test_data, "");
+ init_test_extent_hooks(&test_data->hooks);
+
+ base_t *base = base_new(TSDN_NULL, /* ind */ 1, &test_data->hooks,
+ /* metadata_use_hooks */ true);
+ assert_ptr_not_null(base, "");
+
+ test_data->base = base;
+ bool err = emap_init(&test_data->emap, test_data->base,
+ /* zeroed */ true);
+ assert_false(err, "");
+
+ nstime_t time;
+ nstime_init(&time, 0);
+
+ err = pa_central_init(&test_data->central, base, opt_hpa,
+ &hpa_hooks_default);
+ assert_false(err, "");
+
+ const size_t pa_oversize_threshold = 8 * 1024 * 1024;
+ err = pa_shard_init(TSDN_NULL, &test_data->shard, &test_data->central,
+ &test_data->emap, test_data->base, /* ind */ 1, &test_data->stats,
+ &test_data->stats_mtx, &time, pa_oversize_threshold, dirty_decay_ms,
+ muzzy_decay_ms);
+ assert_false(err, "");
+
+ return test_data;
+}
+
+void destroy_test_data(test_data_t *data) {
+ base_delete(TSDN_NULL, data->base);
+ free(data);
+}
+
+static void *
+do_alloc_free_purge(void *arg) {
+ test_data_t *test_data = (test_data_t *)arg;
+ for (int i = 0; i < 10 * 1000; i++) {
+ bool deferred_work_generated = false;
+ edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE,
+ PAGE, /* slab */ false, /* szind */ 0, /* zero */ false,
+ /* guarded */ false, &deferred_work_generated);
+ assert_ptr_not_null(edata, "");
+ pa_dalloc(TSDN_NULL, &test_data->shard, edata,
+ &deferred_work_generated);
+ malloc_mutex_lock(TSDN_NULL,
+ &test_data->shard.pac.decay_dirty.mtx);
+ pac_decay_all(TSDN_NULL, &test_data->shard.pac,
+ &test_data->shard.pac.decay_dirty,
+ &test_data->shard.pac.stats->decay_dirty,
+ &test_data->shard.pac.ecache_dirty, true);
+ malloc_mutex_unlock(TSDN_NULL,
+ &test_data->shard.pac.decay_dirty.mtx);
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_alloc_free_purge_thds) {
+ test_data_t *test_data = init_test_data(0, 0);
+ thd_t thds[4];
+ for (int i = 0; i < 4; i++) {
+ thd_create(&thds[i], do_alloc_free_purge, test_data);
+ }
+ for (int i = 0; i < 4; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_alloc_free_purge_thds);
+}
diff --git a/deps/jemalloc/test/unit/pack.c b/deps/jemalloc/test/unit/pack.c
new file mode 100644
index 0000000..e639282
--- /dev/null
+++ b/deps/jemalloc/test/unit/pack.c
@@ -0,0 +1,166 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Size class that is a divisor of the page size, ideally 4+ regions per run.
+ */
+#if LG_PAGE <= 14
+#define SZ (ZU(1) << (LG_PAGE - 2))
+#else
+#define SZ ZU(4096)
+#endif
+
+/*
+ * Number of slabs to consume at high water mark. Should be at least 2 so that
+ * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
+ * tested.
+ */
+#define NSLABS 8
+
+static unsigned
+binind_compute(void) {
+ size_t sz;
+ unsigned nbins, i;
+
+ sz = sizeof(nbins);
+ expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ for (i = 0; i < nbins; i++) {
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ size_t size;
+
+ expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
+ &miblen), 0, "Unexpected mallctlnametomb failure");
+ mib[2] = (size_t)i;
+
+ sz = sizeof(size);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
+ 0), 0, "Unexpected mallctlbymib failure");
+ if (size == SZ) {
+ return i;
+ }
+ }
+
+ test_fail("Unable to compute nregs_per_run");
+ return 0;
+}
+
+static size_t
+nregs_per_run_compute(void) {
+ uint32_t nregs;
+ size_t sz;
+ unsigned binind = binind_compute();
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+
+ expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+ "Unexpected mallctlnametomb failure");
+ mib[2] = (size_t)binind;
+ sz = sizeof(nregs);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
+ 0), 0, "Unexpected mallctlbymib failure");
+ return nregs;
+}
+
+static unsigned
+arenas_create_mallctl(void) {
+ unsigned arena_ind;
+ size_t sz;
+
+ sz = sizeof(arena_ind);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Error in arenas.create");
+
+ return arena_ind;
+}
+
+static void
+arena_reset_mallctl(unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+
+ expect_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+TEST_BEGIN(test_pack) {
+ bool prof_enabled;
+ size_t sz = sizeof(prof_enabled);
+ if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) {
+ test_skip_if(prof_enabled);
+ }
+
+ unsigned arena_ind = arenas_create_mallctl();
+ size_t nregs_per_run = nregs_per_run_compute();
+ size_t nregs = nregs_per_run * NSLABS;
+ VARIABLE_ARRAY(void *, ptrs, nregs);
+ size_t i, j, offset;
+
+ /* Fill matrix. */
+ for (i = offset = 0; i < NSLABS; i++) {
+ for (j = 0; j < nregs_per_run; j++) {
+ void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
+ MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(p,
+ "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
+ " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
+ SZ, arena_ind, i, j);
+ ptrs[(i * nregs_per_run) + j] = p;
+ }
+ }
+
+ /*
+ * Free all but one region of each run, but rotate which region is
+ * preserved, so that subsequent allocations exercise the within-run
+ * layout policy.
+ */
+ offset = 0;
+ for (i = offset = 0;
+ i < NSLABS;
+ i++, offset = (offset + 1) % nregs_per_run) {
+ for (j = 0; j < nregs_per_run; j++) {
+ void *p = ptrs[(i * nregs_per_run) + j];
+ if (offset == j) {
+ continue;
+ }
+ dallocx(p, MALLOCX_ARENA(arena_ind) |
+ MALLOCX_TCACHE_NONE);
+ }
+ }
+
+ /*
+ * Logically refill matrix, skipping preserved regions and verifying
+ * that the matrix is unmodified.
+ */
+ offset = 0;
+ for (i = offset = 0;
+ i < NSLABS;
+ i++, offset = (offset + 1) % nregs_per_run) {
+ for (j = 0; j < nregs_per_run; j++) {
+ void *p;
+
+ if (offset == j) {
+ continue;
+ }
+ p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
+ MALLOCX_TCACHE_NONE);
+ expect_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
+ "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
+ i, j);
+ }
+ }
+
+ /* Clean up. */
+ arena_reset_mallctl(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_pack);
+}
diff --git a/deps/jemalloc/test/unit/pack.sh b/deps/jemalloc/test/unit/pack.sh
new file mode 100644
index 0000000..6f45148
--- /dev/null
+++ b/deps/jemalloc/test/unit/pack.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+# Immediately purge to minimize fragmentation.
+export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0"
diff --git a/deps/jemalloc/test/unit/pages.c b/deps/jemalloc/test/unit/pages.c
new file mode 100644
index 0000000..8dfd1a7
--- /dev/null
+++ b/deps/jemalloc/test/unit/pages.c
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_pages_huge) {
+ size_t alloc_size;
+ bool commit;
+ void *pages, *hugepage;
+
+ alloc_size = HUGEPAGE * 2 - PAGE;
+ commit = true;
+ pages = pages_map(NULL, alloc_size, PAGE, &commit);
+ expect_ptr_not_null(pages, "Unexpected pages_map() error");
+
+ if (init_system_thp_mode == thp_mode_default) {
+ hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));
+ expect_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge,
+ "Unexpected pages_huge() result");
+ expect_false(pages_nohuge(hugepage, HUGEPAGE),
+ "Unexpected pages_nohuge() result");
+ }
+
+ pages_unmap(pages, alloc_size);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_pages_huge);
+}
diff --git a/deps/jemalloc/test/unit/peak.c b/deps/jemalloc/test/unit/peak.c
new file mode 100644
index 0000000..1112978
--- /dev/null
+++ b/deps/jemalloc/test/unit/peak.c
@@ -0,0 +1,47 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/peak.h"
+
+TEST_BEGIN(test_peak) {
+ peak_t peak = PEAK_INITIALIZER;
+ expect_u64_eq(0, peak_max(&peak),
+ "Peak should be zero at initialization");
+ peak_update(&peak, 100, 50);
+ expect_u64_eq(50, peak_max(&peak),
+ "Missed update");
+ peak_update(&peak, 100, 100);
+ expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak");
+ peak_update(&peak, 100, 200);
+ expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak");
+ peak_update(&peak, 200, 200);
+ expect_u64_eq(50, peak_max(&peak), "Haven't reached peak again");
+ peak_update(&peak, 300, 200);
+ expect_u64_eq(100, peak_max(&peak), "Missed an update.");
+ peak_set_zero(&peak, 300, 200);
+ expect_u64_eq(0, peak_max(&peak), "No effect from zeroing");
+ peak_update(&peak, 300, 300);
+ expect_u64_eq(0, peak_max(&peak), "Dalloc shouldn't change peak");
+ peak_update(&peak, 400, 300);
+ expect_u64_eq(0, peak_max(&peak), "Should still be net negative");
+ peak_update(&peak, 500, 300);
+ expect_u64_eq(100, peak_max(&peak), "Missed an update.");
+ /*
+ * Above, we set to zero while a net allocator; let's try as a
+ * net-deallocator.
+ */
+ peak_set_zero(&peak, 600, 700);
+ expect_u64_eq(0, peak_max(&peak), "No effect from zeroing.");
+ peak_update(&peak, 600, 800);
+ expect_u64_eq(0, peak_max(&peak), "Dalloc shouldn't change peak.");
+ peak_update(&peak, 700, 800);
+ expect_u64_eq(0, peak_max(&peak), "Should still be net negative.");
+ peak_update(&peak, 800, 800);
+ expect_u64_eq(100, peak_max(&peak), "Missed an update.");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_peak);
+}
diff --git a/deps/jemalloc/test/unit/ph.c b/deps/jemalloc/test/unit/ph.c
new file mode 100644
index 0000000..28f5e48
--- /dev/null
+++ b/deps/jemalloc/test/unit/ph.c
@@ -0,0 +1,330 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ph.h"
+
+typedef struct node_s node_t;
+ph_structs(heap, node_t);
+
+struct node_s {
+#define NODE_MAGIC 0x9823af7e
+ uint32_t magic;
+ heap_link_t link;
+ uint64_t key;
+};
+
+static int
+node_cmp(const node_t *a, const node_t *b) {
+ int ret;
+
+ ret = (a->key > b->key) - (a->key < b->key);
+ if (ret == 0) {
+ /*
+ * Duplicates are not allowed in the heap, so force an
+ * arbitrary ordering for non-identical items with equal keys.
+ */
+ ret = (((uintptr_t)a) > ((uintptr_t)b))
+ - (((uintptr_t)a) < ((uintptr_t)b));
+ }
+ return ret;
+}
+
+static int
+node_cmp_magic(const node_t *a, const node_t *b) {
+
+ expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+
+ return node_cmp(a, b);
+}
+
+ph_gen(static, heap, node_t, link, node_cmp_magic);
+
+static node_t *
+node_next_get(const node_t *node) {
+ return phn_next_get((node_t *)node, offsetof(node_t, link));
+}
+
+static node_t *
+node_prev_get(const node_t *node) {
+ return phn_prev_get((node_t *)node, offsetof(node_t, link));
+}
+
+static node_t *
+node_lchild_get(const node_t *node) {
+ return phn_lchild_get((node_t *)node, offsetof(node_t, link));
+}
+
+static void
+node_print(const node_t *node, unsigned depth) {
+ unsigned i;
+ node_t *leftmost_child, *sibling;
+
+ for (i = 0; i < depth; i++) {
+ malloc_printf("\t");
+ }
+ malloc_printf("%2"FMTu64"\n", node->key);
+
+ leftmost_child = node_lchild_get(node);
+ if (leftmost_child == NULL) {
+ return;
+ }
+ node_print(leftmost_child, depth + 1);
+
+ for (sibling = node_next_get(leftmost_child); sibling !=
+ NULL; sibling = node_next_get(sibling)) {
+ node_print(sibling, depth + 1);
+ }
+}
+
+static void
+heap_print(const heap_t *heap) {
+ node_t *auxelm;
+
+ malloc_printf("vvv heap %p vvv\n", heap);
+ if (heap->ph.root == NULL) {
+ goto label_return;
+ }
+
+ node_print(heap->ph.root, 0);
+
+ for (auxelm = node_next_get(heap->ph.root); auxelm != NULL;
+ auxelm = node_next_get(auxelm)) {
+ expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm,
+ "auxelm's prev doesn't link to auxelm");
+ node_print(auxelm, 0);
+ }
+
+label_return:
+ malloc_printf("^^^ heap %p ^^^\n", heap);
+}
+
+static unsigned
+node_validate(const node_t *node, const node_t *parent) {
+ unsigned nnodes = 1;
+ node_t *leftmost_child, *sibling;
+
+ if (parent != NULL) {
+ expect_d_ge(node_cmp_magic(node, parent), 0,
+ "Child is less than parent");
+ }
+
+ leftmost_child = node_lchild_get(node);
+ if (leftmost_child == NULL) {
+ return nnodes;
+ }
+ expect_ptr_eq(node_prev_get(leftmost_child),
+ (void *)node, "Leftmost child does not link to node");
+ nnodes += node_validate(leftmost_child, node);
+
+ for (sibling = node_next_get(leftmost_child); sibling !=
+ NULL; sibling = node_next_get(sibling)) {
+ expect_ptr_eq(node_next_get(node_prev_get(sibling)), sibling,
+ "sibling's prev doesn't link to sibling");
+ nnodes += node_validate(sibling, node);
+ }
+ return nnodes;
+}
+
+static unsigned
+heap_validate(const heap_t *heap) {
+ unsigned nnodes = 0;
+ node_t *auxelm;
+
+ if (heap->ph.root == NULL) {
+ goto label_return;
+ }
+
+ nnodes += node_validate(heap->ph.root, NULL);
+
+ for (auxelm = node_next_get(heap->ph.root); auxelm != NULL;
+ auxelm = node_next_get(auxelm)) {
+ expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm,
+ "auxelm's prev doesn't link to auxelm");
+ nnodes += node_validate(auxelm, NULL);
+ }
+
+label_return:
+ if (false) {
+ heap_print(heap);
+ }
+ return nnodes;
+}
+
+TEST_BEGIN(test_ph_empty) {
+ heap_t heap;
+
+ heap_new(&heap);
+ expect_true(heap_empty(&heap), "Heap should be empty");
+ expect_ptr_null(heap_first(&heap), "Unexpected node");
+ expect_ptr_null(heap_any(&heap), "Unexpected node");
+}
+TEST_END
+
+static void
+node_remove(heap_t *heap, node_t *node) {
+ heap_remove(heap, node);
+
+ node->magic = 0;
+}
+
+static node_t *
+node_remove_first(heap_t *heap) {
+ node_t *node = heap_remove_first(heap);
+ node->magic = 0;
+ return node;
+}
+
+static node_t *
+node_remove_any(heap_t *heap) {
+ node_t *node = heap_remove_any(heap);
+ node->magic = 0;
+ return node;
+}
+
+TEST_BEGIN(test_ph_random) {
+#define NNODES 25
+#define NBAGS 250
+#define SEED 42
+ sfmt_t *sfmt;
+ uint64_t bag[NNODES];
+ heap_t heap;
+ node_t nodes[NNODES];
+ unsigned i, j, k;
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 0; i < NBAGS; i++) {
+ switch (i) {
+ case 0:
+ /* Insert in order. */
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = j;
+ }
+ break;
+ case 1:
+ /* Insert in reverse order. */
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = NNODES - j - 1;
+ }
+ break;
+ default:
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
+ }
+
+ for (j = 1; j <= NNODES; j++) {
+ /* Initialize heap and nodes. */
+ heap_new(&heap);
+ expect_u_eq(heap_validate(&heap), 0,
+ "Incorrect node count");
+ for (k = 0; k < j; k++) {
+ nodes[k].magic = NODE_MAGIC;
+ nodes[k].key = bag[k];
+ }
+
+ /* Insert nodes. */
+ for (k = 0; k < j; k++) {
+ heap_insert(&heap, &nodes[k]);
+ if (i % 13 == 12) {
+ expect_ptr_not_null(heap_any(&heap),
+ "Heap should not be empty");
+ /* Trigger merging. */
+ expect_ptr_not_null(heap_first(&heap),
+ "Heap should not be empty");
+ }
+ expect_u_eq(heap_validate(&heap), k + 1,
+ "Incorrect node count");
+ }
+
+ expect_false(heap_empty(&heap),
+ "Heap should not be empty");
+
+ /* Remove nodes. */
+ switch (i % 6) {
+ case 0:
+ for (k = 0; k < j; k++) {
+ expect_u_eq(heap_validate(&heap), j - k,
+ "Incorrect node count");
+ node_remove(&heap, &nodes[k]);
+ expect_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ }
+ break;
+ case 1:
+ for (k = j; k > 0; k--) {
+ node_remove(&heap, &nodes[k-1]);
+ expect_u_eq(heap_validate(&heap), k - 1,
+ "Incorrect node count");
+ }
+ break;
+ case 2: {
+ node_t *prev = NULL;
+ for (k = 0; k < j; k++) {
+ node_t *node = node_remove_first(&heap);
+ expect_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ if (prev != NULL) {
+ expect_d_ge(node_cmp(node,
+ prev), 0,
+ "Bad removal order");
+ }
+ prev = node;
+ }
+ break;
+ } case 3: {
+ node_t *prev = NULL;
+ for (k = 0; k < j; k++) {
+ node_t *node = heap_first(&heap);
+ expect_u_eq(heap_validate(&heap), j - k,
+ "Incorrect node count");
+ if (prev != NULL) {
+ expect_d_ge(node_cmp(node,
+ prev), 0,
+ "Bad removal order");
+ }
+ node_remove(&heap, node);
+ expect_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ prev = node;
+ }
+ break;
+ } case 4: {
+ for (k = 0; k < j; k++) {
+ node_remove_any(&heap);
+ expect_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ }
+ break;
+ } case 5: {
+ for (k = 0; k < j; k++) {
+ node_t *node = heap_any(&heap);
+ expect_u_eq(heap_validate(&heap), j - k,
+ "Incorrect node count");
+ node_remove(&heap, node);
+ expect_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ }
+ break;
+ } default:
+ not_reached();
+ }
+
+ expect_ptr_null(heap_first(&heap),
+ "Heap should be empty");
+ expect_ptr_null(heap_any(&heap),
+ "Heap should be empty");
+ expect_true(heap_empty(&heap), "Heap should be empty");
+ }
+ }
+ fini_gen_rand(sfmt);
+#undef NNODES
+#undef SEED
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_ph_empty,
+ test_ph_random);
+}
diff --git a/deps/jemalloc/test/unit/prng.c b/deps/jemalloc/test/unit/prng.c
new file mode 100644
index 0000000..a6d9b01
--- /dev/null
+++ b/deps/jemalloc/test/unit/prng.c
@@ -0,0 +1,189 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_prng_lg_range_u32) {
+ uint32_t sa, sb;
+ uint32_t ra, rb;
+ unsigned lg_range;
+
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32);
+ sa = 42;
+ rb = prng_lg_range_u32(&sa, 32);
+ expect_u32_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ sb = 42;
+ rb = prng_lg_range_u32(&sb, 32);
+ expect_u32_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32);
+ rb = prng_lg_range_u32(&sa, 32);
+ expect_u32_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32);
+ for (lg_range = 31; lg_range > 0; lg_range--) {
+ sb = 42;
+ rb = prng_lg_range_u32(&sb, lg_range);
+ expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ expect_u32_eq(rb, (ra >> (32 - lg_range)),
+ "Expected high order bits of full-width result, "
+ "lg_range=%u", lg_range);
+ }
+
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_u64) {
+ uint64_t sa, sb, ra, rb;
+ unsigned lg_range;
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ sa = 42;
+ rb = prng_lg_range_u64(&sa, 64);
+ expect_u64_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ sb = 42;
+ rb = prng_lg_range_u64(&sb, 64);
+ expect_u64_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ rb = prng_lg_range_u64(&sa, 64);
+ expect_u64_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ for (lg_range = 63; lg_range > 0; lg_range--) {
+ sb = 42;
+ rb = prng_lg_range_u64(&sb, lg_range);
+ expect_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ expect_u64_eq(rb, (ra >> (64 - lg_range)),
+ "Expected high order bits of full-width result, "
+ "lg_range=%u", lg_range);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_zu) {
+ size_t sa, sb;
+ size_t ra, rb;
+ unsigned lg_range;
+
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ sa = 42;
+ rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ expect_zu_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ sb = 42;
+ rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR));
+ expect_zu_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ expect_zu_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
+ lg_range--) {
+ sb = 42;
+ rb = prng_lg_range_zu(&sb, lg_range);
+ expect_zu_eq((rb & (SIZE_T_MAX << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ expect_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
+ lg_range)), "Expected high order bits of full-width "
+ "result, lg_range=%u", lg_range);
+ }
+
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_u32) {
+ uint32_t range;
+
+ const uint32_t max_range = 10000000;
+ const uint32_t range_step = 97;
+ const unsigned nreps = 10;
+
+ for (range = 2; range < max_range; range += range_step) {
+ uint32_t s;
+ unsigned rep;
+
+ s = range;
+ for (rep = 0; rep < nreps; rep++) {
+ uint32_t r = prng_range_u32(&s, range);
+
+ expect_u32_lt(r, range, "Out of range");
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_u64) {
+ uint64_t range;
+
+ const uint64_t max_range = 10000000;
+ const uint64_t range_step = 97;
+ const unsigned nreps = 10;
+
+ for (range = 2; range < max_range; range += range_step) {
+ uint64_t s;
+ unsigned rep;
+
+ s = range;
+ for (rep = 0; rep < nreps; rep++) {
+ uint64_t r = prng_range_u64(&s, range);
+
+ expect_u64_lt(r, range, "Out of range");
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_zu) {
+ size_t range;
+
+ const size_t max_range = 10000000;
+ const size_t range_step = 97;
+ const unsigned nreps = 10;
+
+
+ for (range = 2; range < max_range; range += range_step) {
+ size_t s;
+ unsigned rep;
+
+ s = range;
+ for (rep = 0; rep < nreps; rep++) {
+ size_t r = prng_range_zu(&s, range);
+
+ expect_zu_lt(r, range, "Out of range");
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_prng_lg_range_u32,
+ test_prng_lg_range_u64,
+ test_prng_lg_range_zu,
+ test_prng_range_u32,
+ test_prng_range_u64,
+ test_prng_range_zu);
+}
diff --git a/deps/jemalloc/test/unit/prof_accum.c b/deps/jemalloc/test/unit/prof_accum.c
new file mode 100644
index 0000000..ef392ac
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_accum.c
@@ -0,0 +1,84 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_sys.h"
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD 50
+#define DUMP_INTERVAL 1
+#define BT_COUNT_CHECK_INTERVAL 5
+
+static int
+prof_dump_open_file_intercept(const char *filename, int mode) {
+ int fd;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+static void *
+alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) {
+ return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration);
+}
+
+static void *
+thd_start(void *varg) {
+ unsigned thd_ind = *(unsigned *)varg;
+ size_t bt_count_prev, bt_count;
+ unsigned i_prev, i;
+
+ i_prev = 0;
+ bt_count_prev = 0;
+ for (i = 0; i < NALLOCS_PER_THREAD; i++) {
+ void *p = alloc_from_permuted_backtrace(thd_ind, i);
+ dallocx(p, 0);
+ if (i % DUMP_INTERVAL == 0) {
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ }
+
+ if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
+ i+1 == NALLOCS_PER_THREAD) {
+ bt_count = prof_bt_count();
+ expect_zu_le(bt_count_prev+(i-i_prev), bt_count,
+ "Expected larger backtrace count increase");
+ i_prev = i;
+ bt_count_prev = bt_count;
+ }
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_idump) {
+ bool active;
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ sizeof(active)), 0,
+ "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open_file = prof_dump_open_file_intercept;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_idump);
+}
diff --git a/deps/jemalloc/test/unit/prof_accum.sh b/deps/jemalloc/test/unit/prof_accum.sh
new file mode 100644
index 0000000..b3e13fc
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_accum.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_active.c b/deps/jemalloc/test/unit/prof_active.c
new file mode 100644
index 0000000..af29e7a
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_active.c
@@ -0,0 +1,119 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_data.h"
+
+static void
+mallctl_bool_get(const char *name, bool expected, const char *func, int line) {
+ bool old;
+ size_t sz;
+
+ sz = sizeof(old);
+ expect_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
+ "%s():%d: Unexpected mallctl failure reading %s", func, line, name);
+ expect_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
+ name);
+}
+
+static void
+mallctl_bool_set(const char *name, bool old_expected, bool val_new,
+ const char *func, int line) {
+ bool old;
+ size_t sz;
+
+ sz = sizeof(old);
+ expect_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
+ sizeof(val_new)), 0,
+ "%s():%d: Unexpected mallctl failure reading/writing %s", func,
+ line, name);
+ expect_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
+ line, name);
+}
+
+static void
+mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
+ int line) {
+ mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
+}
+#define mallctl_prof_active_get(a) \
+ mallctl_prof_active_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_prof_active_set_impl(bool prof_active_old_expected,
+ bool prof_active_new, const char *func, int line) {
+ mallctl_bool_set("prof.active", prof_active_old_expected,
+ prof_active_new, func, line);
+}
+#define mallctl_prof_active_set(a, b) \
+ mallctl_prof_active_set_impl(a, b, __func__, __LINE__)
+
+static void
+mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
+ const char *func, int line) {
+ mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
+ func, line);
+}
+#define mallctl_thread_prof_active_get(a) \
+ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
+ bool thread_prof_active_new, const char *func, int line) {
+ mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
+ thread_prof_active_new, func, line);
+}
+#define mallctl_thread_prof_active_set(a, b) \
+ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
+
+static void
+prof_sampling_probe_impl(bool expect_sample, const char *func, int line) {
+ void *p;
+ size_t expected_backtraces = expect_sample ? 1 : 0;
+
+ expect_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
+ line);
+ p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_zu_eq(prof_bt_count(), expected_backtraces,
+ "%s():%d: Unexpected backtrace count", func, line);
+ dallocx(p, 0);
+}
+#define prof_sampling_probe(a) \
+ prof_sampling_probe_impl(a, __func__, __LINE__)
+
+TEST_BEGIN(test_prof_active) {
+ test_skip_if(!config_prof);
+
+ mallctl_prof_active_get(true);
+ mallctl_thread_prof_active_get(false);
+
+ mallctl_prof_active_set(true, true);
+ mallctl_thread_prof_active_set(false, false);
+ /* prof.active, !thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(true, false);
+ mallctl_thread_prof_active_set(false, false);
+ /* !prof.active, !thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(false, false);
+ mallctl_thread_prof_active_set(false, true);
+ /* !prof.active, thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(false, true);
+ mallctl_thread_prof_active_set(true, true);
+ /* prof.active, thread.prof.active. */
+ prof_sampling_probe(true);
+
+ /* Restore settings. */
+ mallctl_prof_active_set(true, true);
+ mallctl_thread_prof_active_set(true, false);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_prof_active);
+}
diff --git a/deps/jemalloc/test/unit/prof_active.sh b/deps/jemalloc/test/unit/prof_active.sh
new file mode 100644
index 0000000..9749674
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_active.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,prof_thread_active_init:false,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_gdump.c b/deps/jemalloc/test/unit/prof_gdump.c
new file mode 100644
index 0000000..46e4503
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_gdump.c
@@ -0,0 +1,77 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_sys.h"
+
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_file_intercept(const char *filename, int mode) {
+ int fd;
+
+ did_prof_dump_open = true;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+TEST_BEGIN(test_gdump) {
+ test_skip_if(opt_hpa);
+ bool active, gdump, gdump_old;
+ void *p, *q, *r, *s;
+ size_t sz;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ sizeof(active)), 0,
+ "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open_file = prof_dump_open_file_intercept;
+
+ did_prof_dump_open = false;
+ p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
+
+ did_prof_dump_open = false;
+ q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
+ expect_ptr_not_null(q, "Unexpected mallocx() failure");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
+
+ gdump = false;
+ sz = sizeof(gdump_old);
+ expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
+ (void *)&gdump, sizeof(gdump)), 0,
+ "Unexpected mallctl failure while disabling prof.gdump");
+ assert(gdump_old);
+ did_prof_dump_open = false;
+ r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
+ expect_ptr_not_null(q, "Unexpected mallocx() failure");
+ expect_false(did_prof_dump_open, "Unexpected profile dump");
+
+ gdump = true;
+ sz = sizeof(gdump_old);
+ expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
+ (void *)&gdump, sizeof(gdump)), 0,
+ "Unexpected mallctl failure while enabling prof.gdump");
+ assert(!gdump_old);
+ did_prof_dump_open = false;
+ s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
+ expect_ptr_not_null(q, "Unexpected mallocx() failure");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
+
+ dallocx(p, 0);
+ dallocx(q, 0);
+ dallocx(r, 0);
+ dallocx(s, 0);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_gdump);
+}
diff --git a/deps/jemalloc/test/unit/prof_gdump.sh b/deps/jemalloc/test/unit/prof_gdump.sh
new file mode 100644
index 0000000..3f600d2
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_gdump.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true"
+fi
+
diff --git a/deps/jemalloc/test/unit/prof_hook.c b/deps/jemalloc/test/unit/prof_hook.c
new file mode 100644
index 0000000..6480d93
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_hook.c
@@ -0,0 +1,169 @@
+#include "test/jemalloc_test.h"
+
+const char *dump_filename = "/dev/null";
+
+prof_backtrace_hook_t default_hook;
+
+bool mock_bt_hook_called = false;
+bool mock_dump_hook_called = false;
+
+void
+mock_bt_hook(void **vec, unsigned *len, unsigned max_len) {
+ *len = max_len;
+ for (unsigned i = 0; i < max_len; ++i) {
+ vec[i] = (void *)((uintptr_t)i);
+ }
+ mock_bt_hook_called = true;
+}
+
+void
+mock_bt_augmenting_hook(void **vec, unsigned *len, unsigned max_len) {
+ default_hook(vec, len, max_len);
+ expect_u_gt(*len, 0, "Default backtrace hook returned empty backtrace");
+ expect_u_lt(*len, max_len,
+ "Default backtrace hook returned too large backtrace");
+
+ /* Add a separator between default frames and augmented */
+ vec[*len] = (void *)0x030303030;
+ (*len)++;
+
+ /* Add more stack frames */
+ for (unsigned i = 0; i < 3; ++i) {
+ if (*len == max_len) {
+ break;
+ }
+ vec[*len] = (void *)((uintptr_t)i);
+ (*len)++;
+ }
+
+
+ mock_bt_hook_called = true;
+}
+
+void
+mock_dump_hook(const char *filename) {
+ mock_dump_hook_called = true;
+ expect_str_eq(filename, dump_filename,
+ "Incorrect file name passed to the dump hook");
+}
+
+TEST_BEGIN(test_prof_backtrace_hook_replace) {
+
+ test_skip_if(!config_prof);
+
+ mock_bt_hook_called = false;
+
+ void *p0 = mallocx(1, 0);
+ assert_ptr_not_null(p0, "Failed to allocate");
+
+ expect_false(mock_bt_hook_called, "Called mock hook before it's set");
+
+ prof_backtrace_hook_t null_hook = NULL;
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ NULL, 0, (void *)&null_hook, sizeof(null_hook)),
+ EINVAL, "Incorrectly allowed NULL backtrace hook");
+
+ size_t default_hook_sz = sizeof(prof_backtrace_hook_t);
+ prof_backtrace_hook_t hook = &mock_bt_hook;
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ (void *)&default_hook, &default_hook_sz, (void *)&hook,
+ sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
+
+ void *p1 = mallocx(1, 0);
+ assert_ptr_not_null(p1, "Failed to allocate");
+
+ expect_true(mock_bt_hook_called, "Didn't call mock hook");
+
+ prof_backtrace_hook_t current_hook;
+ size_t current_hook_sz = sizeof(prof_backtrace_hook_t);
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
+ sizeof(default_hook)), 0,
+ "Unexpected mallctl failure resetting hook to default");
+
+ expect_ptr_eq(current_hook, hook,
+ "Hook returned by mallctl is not equal to mock hook");
+
+ dallocx(p1, 0);
+ dallocx(p0, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_prof_backtrace_hook_augment) {
+
+ test_skip_if(!config_prof);
+
+ mock_bt_hook_called = false;
+
+ void *p0 = mallocx(1, 0);
+ assert_ptr_not_null(p0, "Failed to allocate");
+
+ expect_false(mock_bt_hook_called, "Called mock hook before it's set");
+
+ size_t default_hook_sz = sizeof(prof_backtrace_hook_t);
+ prof_backtrace_hook_t hook = &mock_bt_augmenting_hook;
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ (void *)&default_hook, &default_hook_sz, (void *)&hook,
+ sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
+
+ void *p1 = mallocx(1, 0);
+ assert_ptr_not_null(p1, "Failed to allocate");
+
+ expect_true(mock_bt_hook_called, "Didn't call mock hook");
+
+ prof_backtrace_hook_t current_hook;
+ size_t current_hook_sz = sizeof(prof_backtrace_hook_t);
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
+ sizeof(default_hook)), 0,
+ "Unexpected mallctl failure resetting hook to default");
+
+ expect_ptr_eq(current_hook, hook,
+ "Hook returned by mallctl is not equal to mock hook");
+
+ dallocx(p1, 0);
+ dallocx(p0, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_prof_dump_hook) {
+
+ test_skip_if(!config_prof);
+
+ mock_dump_hook_called = false;
+
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename,
+ sizeof(dump_filename)), 0, "Failed to dump heap profile");
+
+ expect_false(mock_dump_hook_called, "Called dump hook before it's set");
+
+ size_t default_hook_sz = sizeof(prof_dump_hook_t);
+ prof_dump_hook_t hook = &mock_dump_hook;
+ expect_d_eq(mallctl("experimental.hooks.prof_dump",
+ (void *)&default_hook, &default_hook_sz, (void *)&hook,
+ sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
+
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename,
+ sizeof(dump_filename)), 0, "Failed to dump heap profile");
+
+ expect_true(mock_dump_hook_called, "Didn't call mock hook");
+
+ prof_dump_hook_t current_hook;
+ size_t current_hook_sz = sizeof(prof_dump_hook_t);
+ expect_d_eq(mallctl("experimental.hooks.prof_dump",
+ (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
+ sizeof(default_hook)), 0,
+ "Unexpected mallctl failure resetting hook to default");
+
+ expect_ptr_eq(current_hook, hook,
+ "Hook returned by mallctl is not equal to mock hook");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_prof_backtrace_hook_replace,
+ test_prof_backtrace_hook_augment,
+ test_prof_dump_hook);
+}
diff --git a/deps/jemalloc/test/unit/prof_hook.sh b/deps/jemalloc/test/unit/prof_hook.sh
new file mode 100644
index 0000000..c7ebd8f
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_hook.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
+fi
+
diff --git a/deps/jemalloc/test/unit/prof_idump.c b/deps/jemalloc/test/unit/prof_idump.c
new file mode 100644
index 0000000..455ac52
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_idump.c
@@ -0,0 +1,57 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_sys.h"
+
+#define TEST_PREFIX "test_prefix"
+
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_file_intercept(const char *filename, int mode) {
+ int fd;
+
+ did_prof_dump_open = true;
+
+ const char filename_prefix[] = TEST_PREFIX ".";
+ expect_d_eq(strncmp(filename_prefix, filename, sizeof(filename_prefix)
+ - 1), 0, "Dump file name should start with \"" TEST_PREFIX ".\"");
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+TEST_BEGIN(test_idump) {
+ bool active;
+ void *p;
+
+ const char *test_prefix = TEST_PREFIX;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+
+ expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&test_prefix,
+ sizeof(test_prefix)), 0,
+ "Unexpected mallctl failure while overwriting dump prefix");
+
+ expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ sizeof(active)), 0,
+ "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open_file = prof_dump_open_file_intercept;
+
+ did_prof_dump_open = false;
+ p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, 0);
+ expect_true(did_prof_dump_open, "Expected a profile dump");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_idump);
+}
diff --git a/deps/jemalloc/test/unit/prof_idump.sh b/deps/jemalloc/test/unit/prof_idump.sh
new file mode 100644
index 0000000..4dc599a
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_idump.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+export MALLOC_CONF="tcache:false"
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0"
+fi
+
+
diff --git a/deps/jemalloc/test/unit/prof_log.c b/deps/jemalloc/test/unit/prof_log.c
new file mode 100644
index 0000000..5ff208e
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_log.c
@@ -0,0 +1,151 @@
+#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_log.h"
+
+#define N_PARAM 100
+#define N_THREADS 10
+
+static void expect_rep() {
+ expect_b_eq(prof_log_rep_check(), false, "Rep check failed");
+}
+
+static void expect_log_empty() {
+ expect_zu_eq(prof_log_bt_count(), 0,
+ "The log has backtraces; it isn't empty");
+ expect_zu_eq(prof_log_thr_count(), 0,
+ "The log has threads; it isn't empty");
+ expect_zu_eq(prof_log_alloc_count(), 0,
+ "The log has allocations; it isn't empty");
+}
+
+void *buf[N_PARAM];
+
+static void f() {
+ int i;
+ for (i = 0; i < N_PARAM; i++) {
+ buf[i] = malloc(100);
+ }
+ for (i = 0; i < N_PARAM; i++) {
+ free(buf[i]);
+ }
+}
+
+TEST_BEGIN(test_prof_log_many_logs) {
+ int i;
+
+ test_skip_if(!config_prof);
+
+ for (i = 0; i < N_PARAM; i++) {
+ expect_b_eq(prof_log_is_logging(), false,
+ "Logging shouldn't have started yet");
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when starting logging");
+ expect_b_eq(prof_log_is_logging(), true,
+ "Logging should be started by now");
+ expect_log_empty();
+ expect_rep();
+ f();
+ expect_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
+ expect_rep();
+ expect_b_eq(prof_log_is_logging(), true,
+ "Logging should still be on");
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when stopping logging");
+ expect_b_eq(prof_log_is_logging(), false,
+ "Logging should have turned off");
+ }
+}
+TEST_END
+
+thd_t thr_buf[N_THREADS];
+
+static void *f_thread(void *unused) {
+ int i;
+ for (i = 0; i < N_PARAM; i++) {
+ void *p = malloc(100);
+ memset(p, 100, 1);
+ free(p);
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_prof_log_many_threads) {
+
+ test_skip_if(!config_prof);
+
+ int i;
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when starting logging");
+ for (i = 0; i < N_THREADS; i++) {
+ thd_create(&thr_buf[i], &f_thread, NULL);
+ }
+
+ for (i = 0; i < N_THREADS; i++) {
+ thd_join(thr_buf[i], NULL);
+ }
+ expect_zu_eq(prof_log_thr_count(), N_THREADS,
+ "Wrong number of thread entries");
+ expect_rep();
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when stopping logging");
+}
+TEST_END
+
+static void f3() {
+ void *p = malloc(100);
+ free(p);
+}
+
+static void f1() {
+ void *p = malloc(100);
+ f3();
+ free(p);
+}
+
+static void f2() {
+ void *p = malloc(100);
+ free(p);
+}
+
+TEST_BEGIN(test_prof_log_many_traces) {
+
+ test_skip_if(!config_prof);
+
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when starting logging");
+ int i;
+ expect_rep();
+ expect_log_empty();
+ for (i = 0; i < N_PARAM; i++) {
+ expect_rep();
+ f1();
+ expect_rep();
+ f2();
+ expect_rep();
+ f3();
+ expect_rep();
+ }
+ /*
+ * There should be 8 total backtraces: two for malloc/free in f1(), two
+ * for malloc/free in f2(), two for malloc/free in f3(), and then two
+ * for malloc/free in f1()'s call to f3(). However compiler
+ * optimizations such as loop unrolling might generate more call sites.
+ * So >= 8 traces are expected.
+ */
+ expect_zu_ge(prof_log_bt_count(), 8,
+ "Expect at least 8 backtraces given sample workload");
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when stopping logging");
+}
+TEST_END
+
+int
+main(void) {
+ if (config_prof) {
+ prof_log_dummy_set(true);
+ }
+ return test_no_reentrancy(
+ test_prof_log_many_logs,
+ test_prof_log_many_traces,
+ test_prof_log_many_threads);
+}
diff --git a/deps/jemalloc/test/unit/prof_log.sh b/deps/jemalloc/test/unit/prof_log.sh
new file mode 100644
index 0000000..485f9bf
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_log.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_mdump.c b/deps/jemalloc/test/unit/prof_mdump.c
new file mode 100644
index 0000000..75b3a51
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_mdump.c
@@ -0,0 +1,216 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_sys.h"
+
+static const char *test_filename = "test_filename";
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_file_intercept(const char *filename, int mode) {
+ int fd;
+
+ did_prof_dump_open = true;
+
+ /*
+ * Stronger than a strcmp() - verifying that we internally directly use
+ * the caller supplied char pointer.
+ */
+ expect_ptr_eq(filename, test_filename,
+ "Dump file name should be \"%s\"", test_filename);
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+TEST_BEGIN(test_mdump_normal) {
+ test_skip_if(!config_prof);
+
+ prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
+
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ prof_dump_open_file = prof_dump_open_file_intercept;
+ did_prof_dump_open = false;
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
+ sizeof(test_filename)), 0,
+ "Unexpected mallctl failure while dumping");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
+
+ dallocx(p, 0);
+
+ prof_dump_open_file = open_file_orig;
+}
+TEST_END
+
+static int
+prof_dump_open_file_error(const char *filename, int mode) {
+ return -1;
+}
+
+/*
+ * In the context of test_mdump_output_error, prof_dump_write_file_count is the
+ * total number of times prof_dump_write_file_error() is expected to be called.
+ * In the context of test_mdump_maps_error, prof_dump_write_file_count is the
+ * total number of times prof_dump_write_file_error() is expected to be called
+ * starting from the one that contains an 'M' (beginning the "MAPPED_LIBRARIES"
+ * header).
+ */
+static int prof_dump_write_file_count;
+
+static ssize_t
+prof_dump_write_file_error(int fd, const void *s, size_t len) {
+ --prof_dump_write_file_count;
+
+ expect_d_ge(prof_dump_write_file_count, 0,
+ "Write is called after error occurs");
+
+ if (prof_dump_write_file_count == 0) {
+ return -1;
+ } else {
+ /*
+ * Any non-negative number indicates success, and for
+ * simplicity we just use 0. When prof_dump_write_file_count
+ * is positive, it means that we haven't reached the write that
+ * we want to fail; when prof_dump_write_file_count is
+ * negative, it means that we've already violated the
+ * expect_d_ge(prof_dump_write_file_count, 0) statement above,
+ * but instead of aborting, we continue the rest of the test,
+ * and we indicate that all the writes after the failed write
+ * are successful.
+ */
+ return 0;
+ }
+}
+
+static void
+expect_write_failure(int count) {
+ prof_dump_write_file_count = count;
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
+ sizeof(test_filename)), EFAULT, "Dump should err");
+ expect_d_eq(prof_dump_write_file_count, 0,
+ "Dumping stopped after a wrong number of writes");
+}
+
+TEST_BEGIN(test_mdump_output_error) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_debug);
+
+ prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
+ prof_dump_write_file_t *write_file_orig = prof_dump_write_file;
+
+ prof_dump_write_file = prof_dump_write_file_error;
+
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ /*
+ * When opening the dump file fails, there shouldn't be any write, and
+ * mallctl() should return failure.
+ */
+ prof_dump_open_file = prof_dump_open_file_error;
+ expect_write_failure(0);
+
+ /*
+ * When the n-th write fails, there shouldn't be any more write, and
+ * mallctl() should return failure.
+ */
+ prof_dump_open_file = prof_dump_open_file_intercept;
+ expect_write_failure(1); /* First write fails. */
+ expect_write_failure(2); /* Second write fails. */
+
+ dallocx(p, 0);
+
+ prof_dump_open_file = open_file_orig;
+ prof_dump_write_file = write_file_orig;
+}
+TEST_END
+
+static int
+prof_dump_open_maps_error() {
+ return -1;
+}
+
+static bool started_piping_maps_file;
+
+static ssize_t
+prof_dump_write_maps_file_error(int fd, const void *s, size_t len) {
+ /* The main dump doesn't contain any capital 'M'. */
+ if (!started_piping_maps_file && strchr(s, 'M') != NULL) {
+ started_piping_maps_file = true;
+ }
+
+ if (started_piping_maps_file) {
+ return prof_dump_write_file_error(fd, s, len);
+ } else {
+ /* Return success when we haven't started piping maps. */
+ return 0;
+ }
+}
+
+static void
+expect_maps_write_failure(int count) {
+ int mfd = prof_dump_open_maps();
+ if (mfd == -1) {
+ /* No need to continue if we just can't find the maps file. */
+ return;
+ }
+ close(mfd);
+ started_piping_maps_file = false;
+ expect_write_failure(count);
+ expect_true(started_piping_maps_file, "Should start piping maps");
+}
+
+TEST_BEGIN(test_mdump_maps_error) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_debug);
+
+ prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
+ prof_dump_write_file_t *write_file_orig = prof_dump_write_file;
+ prof_dump_open_maps_t *open_maps_orig = prof_dump_open_maps;
+
+ prof_dump_open_file = prof_dump_open_file_intercept;
+ prof_dump_write_file = prof_dump_write_maps_file_error;
+
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ /*
+ * When opening the maps file fails, there shouldn't be any maps write,
+ * and mallctl() should return success.
+ */
+ prof_dump_open_maps = prof_dump_open_maps_error;
+ started_piping_maps_file = false;
+ prof_dump_write_file_count = 0;
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
+ sizeof(test_filename)), 0,
+ "mallctl should not fail in case of maps file opening failure");
+ expect_false(started_piping_maps_file, "Shouldn't start piping maps");
+ expect_d_eq(prof_dump_write_file_count, 0,
+ "Dumping stopped after a wrong number of writes");
+
+ /*
+ * When the n-th maps write fails (given that we are able to find the
+ * maps file), there shouldn't be any more maps write, and mallctl()
+ * should return failure.
+ */
+ prof_dump_open_maps = open_maps_orig;
+ expect_maps_write_failure(1); /* First write fails. */
+ expect_maps_write_failure(2); /* Second write fails. */
+
+ dallocx(p, 0);
+
+ prof_dump_open_file = open_file_orig;
+ prof_dump_write_file = write_file_orig;
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_mdump_normal,
+ test_mdump_output_error,
+ test_mdump_maps_error);
+}
diff --git a/deps/jemalloc/test/unit/prof_mdump.sh b/deps/jemalloc/test/unit/prof_mdump.sh
new file mode 100644
index 0000000..d14cb8c
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_mdump.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,lg_prof_sample:0"
+fi
+
diff --git a/deps/jemalloc/test/unit/prof_recent.c b/deps/jemalloc/test/unit/prof_recent.c
new file mode 100644
index 0000000..4fb3723
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_recent.c
@@ -0,0 +1,678 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_recent.h"
+
+/* As specified in the shell script */
+#define OPT_ALLOC_MAX 3
+
+/* Invariant before and after every test (when config_prof is on) */
+static void
+confirm_prof_setup() {
+ /* Options */
+ assert_true(opt_prof, "opt_prof not on");
+ assert_true(opt_prof_active, "opt_prof_active not on");
+ assert_zd_eq(opt_prof_recent_alloc_max, OPT_ALLOC_MAX,
+ "opt_prof_recent_alloc_max not set correctly");
+
+ /* Dynamics */
+ assert_true(prof_active_state, "prof_active not on");
+ assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX,
+ "prof_recent_alloc_max not set correctly");
+}
+
+TEST_BEGIN(test_confirm_setup) {
+ test_skip_if(!config_prof);
+ confirm_prof_setup();
+}
+TEST_END
+
+TEST_BEGIN(test_prof_recent_off) {
+ test_skip_if(config_prof);
+
+ const ssize_t past_ref = 0, future_ref = 0;
+ const size_t len_ref = sizeof(ssize_t);
+
+ ssize_t past = past_ref, future = future_ref;
+ size_t len = len_ref;
+
+#define ASSERT_SHOULD_FAIL(opt, a, b, c, d) do { \
+ assert_d_eq(mallctl("experimental.prof_recent." opt, a, b, c, \
+ d), ENOENT, "Should return ENOENT when config_prof is off");\
+ assert_zd_eq(past, past_ref, "output was touched"); \
+ assert_zu_eq(len, len_ref, "output length was touched"); \
+ assert_zd_eq(future, future_ref, "input was touched"); \
+} while (0)
+
+ ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, NULL, 0);
+ ASSERT_SHOULD_FAIL("alloc_max", &past, &len, NULL, 0);
+ ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, &future, len);
+ ASSERT_SHOULD_FAIL("alloc_max", &past, &len, &future, len);
+
+#undef ASSERT_SHOULD_FAIL
+}
+TEST_END
+
+TEST_BEGIN(test_prof_recent_on) {
+ test_skip_if(!config_prof);
+
+ ssize_t past, future;
+ size_t len = sizeof(ssize_t);
+
+ confirm_prof_setup();
+
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, NULL, 0), 0, "no-op mallctl should be allowed");
+ confirm_prof_setup();
+
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, NULL, 0), 0, "Read error");
+ expect_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result");
+ future = OPT_ALLOC_MAX + 1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, len), 0, "Write error");
+ future = -1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, &future, len), 0, "Read/write error");
+ expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result");
+ future = -2;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, &future, len), EINVAL,
+ "Invalid write should return EINVAL");
+ expect_zd_eq(past, OPT_ALLOC_MAX + 1,
+ "Output should not be touched given invalid write");
+ future = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, &future, len), 0, "Read/write error");
+ expect_zd_eq(past, -1, "Wrong read result");
+ future = OPT_ALLOC_MAX + 2;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, &future, len * 2), EINVAL,
+ "Invalid write should return EINVAL");
+ expect_zd_eq(past, -1,
+ "Output should not be touched given invalid write");
+
+ confirm_prof_setup();
+}
+TEST_END
+
+/* Reproducible sequence of request sizes */
+#define NTH_REQ_SIZE(n) ((n) * 97 + 101)
+
+static void
+confirm_malloc(void *p) {
+ assert_ptr_not_null(p, "malloc failed unexpectedly");
+ edata_t *e = emap_edata_lookup(TSDN_NULL, &arena_emap_global, p);
+ assert_ptr_not_null(e, "NULL edata for living pointer");
+ prof_recent_t *n = edata_prof_recent_alloc_get_no_lock_test(e);
+ assert_ptr_not_null(n, "Record in edata should not be NULL");
+ expect_ptr_not_null(n->alloc_tctx,
+ "alloc_tctx in record should not be NULL");
+ expect_ptr_eq(e, prof_recent_alloc_edata_get_no_lock_test(n),
+ "edata pointer in record is not correct");
+ expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
+}
+
+static void
+confirm_record_size(prof_recent_t *n, unsigned kth) {
+ expect_zu_eq(n->size, NTH_REQ_SIZE(kth),
+ "Recorded allocation size is wrong");
+}
+
+static void
+confirm_record_living(prof_recent_t *n) {
+ expect_ptr_not_null(n->alloc_tctx,
+ "alloc_tctx in record should not be NULL");
+ edata_t *edata = prof_recent_alloc_edata_get_no_lock_test(n);
+ assert_ptr_not_null(edata,
+ "Recorded edata should not be NULL for living pointer");
+ expect_ptr_eq(n, edata_prof_recent_alloc_get_no_lock_test(edata),
+ "Record in edata is not correct");
+ expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
+}
+
+static void
+confirm_record_released(prof_recent_t *n) {
+ expect_ptr_not_null(n->alloc_tctx,
+ "alloc_tctx in record should not be NULL");
+ expect_ptr_null(prof_recent_alloc_edata_get_no_lock_test(n),
+ "Recorded edata should be NULL for released pointer");
+ expect_ptr_not_null(n->dalloc_tctx,
+ "dalloc_tctx in record should not be NULL for released pointer");
+}
+
+TEST_BEGIN(test_prof_recent_alloc) {
+ test_skip_if(!config_prof);
+
+ bool b;
+ unsigned i, c;
+ size_t req_size;
+ void *p;
+ prof_recent_t *n;
+ ssize_t future;
+
+ confirm_prof_setup();
+
+ /*
+ * First batch of 2 * OPT_ALLOC_MAX allocations. After the
+ * (OPT_ALLOC_MAX - 1)'th allocation the recorded allocations should
+ * always be the last OPT_ALLOC_MAX allocations coming from here.
+ */
+ for (i = 0; i < 2 * OPT_ALLOC_MAX; ++i) {
+ req_size = NTH_REQ_SIZE(i);
+ p = malloc(req_size);
+ confirm_malloc(p);
+ if (i < OPT_ALLOC_MAX - 1) {
+ assert_false(ql_empty(&prof_recent_alloc_list),
+ "Empty recent allocation");
+ free(p);
+ /*
+ * The recorded allocations may still include some
+ * other allocations before the test run started,
+ * so keep allocating without checking anything.
+ */
+ continue;
+ }
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++c;
+ confirm_record_size(n, i + c - OPT_ALLOC_MAX);
+ if (c == OPT_ALLOC_MAX) {
+ confirm_record_living(n);
+ } else {
+ confirm_record_released(n);
+ }
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+ free(p);
+ }
+
+ confirm_prof_setup();
+
+ b = false;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
+ "mallctl for turning off prof_active failed");
+
+ /*
+ * Second batch of OPT_ALLOC_MAX allocations. Since prof_active is
+ * turned off, this batch shouldn't be recorded.
+ */
+ for (; i < 3 * OPT_ALLOC_MAX; ++i) {
+ req_size = NTH_REQ_SIZE(i);
+ p = malloc(req_size);
+ assert_ptr_not_null(p, "malloc failed unexpectedly");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ confirm_record_size(n, c + OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ ++c;
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+ free(p);
+ }
+
+ b = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
+ "mallctl for turning on prof_active failed");
+
+ confirm_prof_setup();
+
+ /*
+ * Third batch of OPT_ALLOC_MAX allocations. Since prof_active is
+ * turned back on, they should be recorded, and in the list of recorded
+ * allocations they should follow the first batch rather than the
+ * second batch.
+ */
+ for (; i < 4 * OPT_ALLOC_MAX; ++i) {
+ req_size = NTH_REQ_SIZE(i);
+ p = malloc(req_size);
+ confirm_malloc(p);
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++c;
+ confirm_record_size(n,
+ /* Is the allocation from the third batch? */
+ i + c - OPT_ALLOC_MAX >= 3 * OPT_ALLOC_MAX ?
+ /* If yes, then it's just recorded. */
+ i + c - OPT_ALLOC_MAX :
+ /*
+ * Otherwise, it should come from the first batch
+ * instead of the second batch.
+ */
+ i + c - 2 * OPT_ALLOC_MAX);
+ if (c == OPT_ALLOC_MAX) {
+ confirm_record_living(n);
+ } else {
+ confirm_record_released(n);
+ }
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+ free(p);
+ }
+
+ /* Increasing the limit shouldn't alter the list of records. */
+ future = OPT_ALLOC_MAX + 1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ ++c;
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+
+ /*
+ * Decreasing the limit shouldn't alter the list of records as long as
+ * the new limit is still no less than the length of the list.
+ */
+ future = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ ++c;
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+
+ /*
+ * Decreasing the limit should shorten the list of records if the new
+ * limit is less than the length of the list.
+ */
+ future = OPT_ALLOC_MAX - 1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++c;
+ confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX - 1,
+ "Incorrect total number of allocations");
+
+ /* Setting to unlimited shouldn't alter the list of records. */
+ future = -1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++c;
+ confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX - 1,
+ "Incorrect total number of allocations");
+
+ /* Downshift to only one record. */
+ future = 1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ assert_false(ql_empty(&prof_recent_alloc_list), "Recent list is empty");
+ n = ql_first(&prof_recent_alloc_list);
+ confirm_record_size(n, 4 * OPT_ALLOC_MAX - 1);
+ confirm_record_released(n);
+ n = ql_next(&prof_recent_alloc_list, n, link);
+ assert_ptr_null(n, "Recent list should only contain one record");
+
+ /* Completely turn off. */
+ future = 0;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ assert_true(ql_empty(&prof_recent_alloc_list),
+ "Recent list should be empty");
+
+ /* Restore the settings. */
+ future = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ assert_true(ql_empty(&prof_recent_alloc_list),
+ "Recent list should be empty");
+
+ confirm_prof_setup();
+}
+TEST_END
+
+#undef NTH_REQ_SIZE
+
+#define DUMP_OUT_SIZE 4096
+static char dump_out[DUMP_OUT_SIZE];
+static size_t dump_out_len = 0;
+
+static void
+test_dump_write_cb(void *not_used, const char *str) {
+ size_t len = strlen(str);
+ assert(dump_out_len + len < DUMP_OUT_SIZE);
+ memcpy(dump_out + dump_out_len, str, len + 1);
+ dump_out_len += len;
+}
+
+static void
+call_dump() {
+ static void *in[2] = {test_dump_write_cb, NULL};
+ dump_out_len = 0;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_dump",
+ NULL, NULL, in, sizeof(in)), 0, "Dump mallctl raised error");
+}
+
+typedef struct {
+ size_t size;
+ size_t usize;
+ bool released;
+} confirm_record_t;
+
+#define DUMP_ERROR "Dump output is wrong"
+
+static void
+confirm_record(const char *template, const confirm_record_t *records,
+ const size_t n_records) {
+ static const char *types[2] = {"alloc", "dalloc"};
+ static char buf[64];
+
+ /*
+ * The template string would be in the form of:
+ * "{...,\"recent_alloc\":[]}",
+ * and dump_out would be in the form of:
+ * "{...,\"recent_alloc\":[...]}".
+ * Using "- 2" serves to cut right before the ending "]}".
+ */
+ assert_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0,
+ DUMP_ERROR);
+ assert_d_eq(memcmp(dump_out + strlen(dump_out) - 2,
+ template + strlen(template) - 2, 2), 0, DUMP_ERROR);
+
+ const char *start = dump_out + strlen(template) - 2;
+ const char *end = dump_out + strlen(dump_out) - 2;
+ const confirm_record_t *record;
+ for (record = records; record < records + n_records; ++record) {
+
+#define ASSERT_CHAR(c) do { \
+ assert_true(start < end, DUMP_ERROR); \
+ assert_c_eq(*start++, c, DUMP_ERROR); \
+} while (0)
+
+#define ASSERT_STR(s) do { \
+ const size_t len = strlen(s); \
+ assert_true(start + len <= end, DUMP_ERROR); \
+ assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR); \
+ start += len; \
+} while (0)
+
+#define ASSERT_FORMATTED_STR(s, ...) do { \
+ malloc_snprintf(buf, sizeof(buf), s, __VA_ARGS__); \
+ ASSERT_STR(buf); \
+} while (0)
+
+ if (record != records) {
+ ASSERT_CHAR(',');
+ }
+
+ ASSERT_CHAR('{');
+
+ ASSERT_STR("\"size\"");
+ ASSERT_CHAR(':');
+ ASSERT_FORMATTED_STR("%zu", record->size);
+ ASSERT_CHAR(',');
+
+ ASSERT_STR("\"usize\"");
+ ASSERT_CHAR(':');
+ ASSERT_FORMATTED_STR("%zu", record->usize);
+ ASSERT_CHAR(',');
+
+ ASSERT_STR("\"released\"");
+ ASSERT_CHAR(':');
+ ASSERT_STR(record->released ? "true" : "false");
+ ASSERT_CHAR(',');
+
+ const char **type = types;
+ while (true) {
+ ASSERT_FORMATTED_STR("\"%s_thread_uid\"", *type);
+ ASSERT_CHAR(':');
+ while (isdigit(*start)) {
+ ++start;
+ }
+ ASSERT_CHAR(',');
+
+ if (opt_prof_sys_thread_name) {
+ ASSERT_FORMATTED_STR("\"%s_thread_name\"",
+ *type);
+ ASSERT_CHAR(':');
+ ASSERT_CHAR('"');
+ while (*start != '"') {
+ ++start;
+ }
+ ASSERT_CHAR('"');
+ ASSERT_CHAR(',');
+ }
+
+ ASSERT_FORMATTED_STR("\"%s_time\"", *type);
+ ASSERT_CHAR(':');
+ while (isdigit(*start)) {
+ ++start;
+ }
+ ASSERT_CHAR(',');
+
+ ASSERT_FORMATTED_STR("\"%s_trace\"", *type);
+ ASSERT_CHAR(':');
+ ASSERT_CHAR('[');
+ while (isdigit(*start) || *start == 'x' ||
+ (*start >= 'a' && *start <= 'f') ||
+ *start == '\"' || *start == ',') {
+ ++start;
+ }
+ ASSERT_CHAR(']');
+
+ if (strcmp(*type, "dalloc") == 0) {
+ break;
+ }
+
+ assert(strcmp(*type, "alloc") == 0);
+ if (!record->released) {
+ break;
+ }
+
+ ASSERT_CHAR(',');
+ ++type;
+ }
+
+ ASSERT_CHAR('}');
+
+#undef ASSERT_FORMATTED_STR
+#undef ASSERT_STR
+#undef ASSERT_CHAR
+
+ }
+ assert_ptr_eq(record, records + n_records, DUMP_ERROR);
+ assert_ptr_eq(start, end, DUMP_ERROR);
+}
+
+TEST_BEGIN(test_prof_recent_alloc_dump) {
+ test_skip_if(!config_prof);
+
+ confirm_prof_setup();
+
+ ssize_t future;
+ void *p, *q;
+ confirm_record_t records[2];
+
+ assert_zu_eq(lg_prof_sample, (size_t)0,
+ "lg_prof_sample not set correctly");
+
+ future = 0;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ call_dump();
+ expect_str_eq(dump_out, "{\"sample_interval\":1,"
+ "\"recent_alloc_max\":0,\"recent_alloc\":[]}", DUMP_ERROR);
+
+ future = 2;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ call_dump();
+ const char *template = "{\"sample_interval\":1,"
+ "\"recent_alloc_max\":2,\"recent_alloc\":[]}";
+ expect_str_eq(dump_out, template, DUMP_ERROR);
+
+ p = malloc(7);
+ call_dump();
+ records[0].size = 7;
+ records[0].usize = sz_s2u(7);
+ records[0].released = false;
+ confirm_record(template, records, 1);
+
+ q = mallocx(17, MALLOCX_ALIGN(128));
+ call_dump();
+ records[1].size = 17;
+ records[1].usize = sz_sa2u(17, 128);
+ records[1].released = false;
+ confirm_record(template, records, 2);
+
+ free(q);
+ call_dump();
+ records[1].released = true;
+ confirm_record(template, records, 2);
+
+ free(p);
+ call_dump();
+ records[0].released = true;
+ confirm_record(template, records, 2);
+
+ future = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ confirm_prof_setup();
+}
+TEST_END
+
+#undef DUMP_ERROR
+#undef DUMP_OUT_SIZE
+
+#define N_THREADS 8
+#define N_PTRS 512
+#define N_CTLS 8
+#define N_ITERS 2048
+#define STRESS_ALLOC_MAX 4096
+
+typedef struct {
+ thd_t thd;
+ size_t id;
+ void *ptrs[N_PTRS];
+ size_t count;
+} thd_data_t;
+
+static thd_data_t thd_data[N_THREADS];
+static ssize_t test_max;
+
+static void
+test_write_cb(void *cbopaque, const char *str) {
+ sleep_ns(1000 * 1000);
+}
+
+static void *
+f_thread(void *arg) {
+ const size_t thd_id = *(size_t *)arg;
+ thd_data_t *data_p = thd_data + thd_id;
+ assert(data_p->id == thd_id);
+ data_p->count = 0;
+ uint64_t rand = (uint64_t)thd_id;
+ tsd_t *tsd = tsd_fetch();
+ assert(test_max > 1);
+ ssize_t last_max = -1;
+ for (int i = 0; i < N_ITERS; i++) {
+ rand = prng_range_u64(&rand, N_PTRS + N_CTLS * 5);
+ assert(data_p->count <= N_PTRS);
+ if (rand < data_p->count) {
+ assert(data_p->count > 0);
+ if (rand != data_p->count - 1) {
+ assert(data_p->count > 1);
+ void *temp = data_p->ptrs[rand];
+ data_p->ptrs[rand] =
+ data_p->ptrs[data_p->count - 1];
+ data_p->ptrs[data_p->count - 1] = temp;
+ }
+ free(data_p->ptrs[--data_p->count]);
+ } else if (rand < N_PTRS) {
+ assert(data_p->count < N_PTRS);
+ data_p->ptrs[data_p->count++] = malloc(1);
+ } else if (rand % 5 == 0) {
+ prof_recent_alloc_dump(tsd, test_write_cb, NULL);
+ } else if (rand % 5 == 1) {
+ last_max = prof_recent_alloc_max_ctl_read();
+ } else if (rand % 5 == 2) {
+ last_max =
+ prof_recent_alloc_max_ctl_write(tsd, test_max * 2);
+ } else if (rand % 5 == 3) {
+ last_max =
+ prof_recent_alloc_max_ctl_write(tsd, test_max);
+ } else {
+ assert(rand % 5 == 4);
+ last_max =
+ prof_recent_alloc_max_ctl_write(tsd, test_max / 2);
+ }
+ assert_zd_ge(last_max, -1, "Illegal last-N max");
+ }
+
+ while (data_p->count > 0) {
+ free(data_p->ptrs[--data_p->count]);
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_prof_recent_stress) {
+ test_skip_if(!config_prof);
+
+ confirm_prof_setup();
+
+ test_max = OPT_ALLOC_MAX;
+ for (size_t i = 0; i < N_THREADS; i++) {
+ thd_data_t *data_p = thd_data + i;
+ data_p->id = i;
+ thd_create(&data_p->thd, &f_thread, &data_p->id);
+ }
+ for (size_t i = 0; i < N_THREADS; i++) {
+ thd_data_t *data_p = thd_data + i;
+ thd_join(data_p->thd, NULL);
+ }
+
+ test_max = STRESS_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
+ for (size_t i = 0; i < N_THREADS; i++) {
+ thd_data_t *data_p = thd_data + i;
+ data_p->id = i;
+ thd_create(&data_p->thd, &f_thread, &data_p->id);
+ }
+ for (size_t i = 0; i < N_THREADS; i++) {
+ thd_data_t *data_p = thd_data + i;
+ thd_join(data_p->thd, NULL);
+ }
+
+ test_max = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
+ confirm_prof_setup();
+}
+TEST_END
+
+#undef STRESS_ALLOC_MAX
+#undef N_ITERS
+#undef N_PTRS
+#undef N_THREADS
+
+int
+main(void) {
+ return test(
+ test_confirm_setup,
+ test_prof_recent_off,
+ test_prof_recent_on,
+ test_prof_recent_alloc,
+ test_prof_recent_alloc_dump,
+ test_prof_recent_stress);
+}
diff --git a/deps/jemalloc/test/unit/prof_recent.sh b/deps/jemalloc/test/unit/prof_recent.sh
new file mode 100644
index 0000000..58a54a4
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_recent.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_recent_alloc_max:3"
+fi
diff --git a/deps/jemalloc/test/unit/prof_reset.c b/deps/jemalloc/test/unit/prof_reset.c
new file mode 100644
index 0000000..9b33b20
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_reset.c
@@ -0,0 +1,266 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_sys.h"
+
+static int
+prof_dump_open_file_intercept(const char *filename, int mode) {
+ int fd;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+static void
+set_prof_active(bool active) {
+ expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ sizeof(active)), 0, "Unexpected mallctl failure");
+}
+
+static size_t
+get_lg_prof_sample(void) {
+ size_t ret;
+ size_t sz = sizeof(size_t);
+
+ expect_d_eq(mallctl("prof.lg_sample", (void *)&ret, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure while reading profiling sample rate");
+ return ret;
+}
+
+static void
+do_prof_reset(size_t lg_prof_sample_input) {
+ expect_d_eq(mallctl("prof.reset", NULL, NULL,
+ (void *)&lg_prof_sample_input, sizeof(size_t)), 0,
+ "Unexpected mallctl failure while resetting profile data");
+ expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(),
+ "Expected profile sample rate change");
+}
+
+TEST_BEGIN(test_prof_reset_basic) {
+ size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next;
+ size_t sz;
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
+ &sz, NULL, 0), 0,
+ "Unexpected mallctl failure while reading profiling sample rate");
+ expect_zu_eq(lg_prof_sample_orig, 0,
+ "Unexpected profiling sample rate");
+ lg_prof_sample_cur = get_lg_prof_sample();
+ expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
+ "Unexpected disagreement between \"opt.lg_prof_sample\" and "
+ "\"prof.lg_sample\"");
+
+ /* Test simple resets. */
+ for (i = 0; i < 2; i++) {
+ expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure while resetting profile data");
+ lg_prof_sample_cur = get_lg_prof_sample();
+ expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
+ "Unexpected profile sample rate change");
+ }
+
+ /* Test resets with prof.lg_sample changes. */
+ lg_prof_sample_next = 1;
+ for (i = 0; i < 2; i++) {
+ do_prof_reset(lg_prof_sample_next);
+ lg_prof_sample_cur = get_lg_prof_sample();
+ expect_zu_eq(lg_prof_sample_cur, lg_prof_sample_next,
+ "Expected profile sample rate change");
+ lg_prof_sample_next = lg_prof_sample_orig;
+ }
+
+ /* Make sure the test code restored prof.lg_sample. */
+ lg_prof_sample_cur = get_lg_prof_sample();
+ expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
+ "Unexpected disagreement between \"opt.lg_prof_sample\" and "
+ "\"prof.lg_sample\"");
+}
+TEST_END
+
+TEST_BEGIN(test_prof_reset_cleanup) {
+ test_skip_if(!config_prof);
+
+ set_prof_active(true);
+
+ expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+ void *p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
+
+ prof_cnt_t cnt_all;
+ prof_cnt_all(&cnt_all);
+ expect_u64_eq(cnt_all.curobjs, 1, "Expected 1 allocation");
+
+ expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected error while resetting heap profile data");
+ prof_cnt_all(&cnt_all);
+ expect_u64_eq(cnt_all.curobjs, 0, "Expected 0 allocations");
+ expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
+
+ dallocx(p, 0);
+ expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+
+ set_prof_active(false);
+}
+TEST_END
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD (1U << 13)
+#define OBJ_RING_BUF_COUNT 1531
+#define RESET_INTERVAL (1U << 10)
+#define DUMP_INTERVAL 3677
+static void *
+thd_start(void *varg) {
+ unsigned thd_ind = *(unsigned *)varg;
+ unsigned i;
+ void *objs[OBJ_RING_BUF_COUNT];
+
+ memset(objs, 0, sizeof(objs));
+
+ for (i = 0; i < NALLOCS_PER_THREAD; i++) {
+ if (i % RESET_INTERVAL == 0) {
+ expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while resetting heap profile "
+ "data");
+ }
+
+ if (i % DUMP_INTERVAL == 0) {
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ }
+
+ {
+ void **pp = &objs[i % OBJ_RING_BUF_COUNT];
+ if (*pp != NULL) {
+ dallocx(*pp, 0);
+ *pp = NULL;
+ }
+ *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
+ expect_ptr_not_null(*pp,
+ "Unexpected btalloc() failure");
+ }
+ }
+
+ /* Clean up any remaining objects. */
+ for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
+ void **pp = &objs[i % OBJ_RING_BUF_COUNT];
+ if (*pp != NULL) {
+ dallocx(*pp, 0);
+ *pp = NULL;
+ }
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_prof_reset) {
+ size_t lg_prof_sample_orig;
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+ size_t bt_count, tdata_count;
+
+ test_skip_if(!config_prof);
+
+ bt_count = prof_bt_count();
+ expect_zu_eq(bt_count, 0,
+ "Unexpected pre-existing tdata structures");
+ tdata_count = prof_tdata_count();
+
+ lg_prof_sample_orig = get_lg_prof_sample();
+ do_prof_reset(5);
+
+ set_prof_active(true);
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+
+ expect_zu_eq(prof_bt_count(), bt_count,
+ "Unexpected bactrace count change");
+ expect_zu_eq(prof_tdata_count(), tdata_count,
+ "Unexpected remaining tdata structures");
+
+ set_prof_active(false);
+
+ do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NTHREADS
+#undef NALLOCS_PER_THREAD
+#undef OBJ_RING_BUF_COUNT
+#undef RESET_INTERVAL
+#undef DUMP_INTERVAL
+
+/* Test sampling at the same allocation site across resets. */
+#define NITER 10
+TEST_BEGIN(test_xallocx) {
+ size_t lg_prof_sample_orig;
+ unsigned i;
+ void *ptrs[NITER];
+
+ test_skip_if(!config_prof);
+
+ lg_prof_sample_orig = get_lg_prof_sample();
+ set_prof_active(true);
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ for (i = 0; i < NITER; i++) {
+ void *p;
+ size_t sz, nsz;
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ /* Allocate small object (which will be promoted). */
+ p = ptrs[i] = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ /* Perform successful xallocx(). */
+ sz = sallocx(p, 0);
+ expect_zu_eq(xallocx(p, sz, 0, 0), sz,
+ "Unexpected xallocx() failure");
+
+ /* Perform unsuccessful xallocx(). */
+ nsz = nallocx(sz+1, 0);
+ expect_zu_eq(xallocx(p, nsz, 0, 0), sz,
+ "Unexpected xallocx() success");
+ }
+
+ for (i = 0; i < NITER; i++) {
+ /* dallocx. */
+ dallocx(ptrs[i], 0);
+ }
+
+ set_prof_active(false);
+ do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NITER
+
+int
+main(void) {
+ /* Intercept dumping prior to running any tests. */
+ prof_dump_open_file = prof_dump_open_file_intercept;
+
+ return test_no_reentrancy(
+ test_prof_reset_basic,
+ test_prof_reset_cleanup,
+ test_prof_reset,
+ test_xallocx);
+}
diff --git a/deps/jemalloc/test/unit/prof_reset.sh b/deps/jemalloc/test/unit/prof_reset.sh
new file mode 100644
index 0000000..daefeb7
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_reset.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0,prof_recent_alloc_max:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_stats.c b/deps/jemalloc/test/unit/prof_stats.c
new file mode 100644
index 0000000..c88c4ae
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_stats.c
@@ -0,0 +1,151 @@
+#include "test/jemalloc_test.h"
+
+#define N_PTRS 3
+
+static void
+test_combinations(szind_t ind, size_t sizes_array[N_PTRS],
+ int flags_array[N_PTRS]) {
+#define MALLCTL_STR_LEN 64
+ assert(opt_prof && opt_prof_stats);
+
+ char mallctl_live_str[MALLCTL_STR_LEN];
+ char mallctl_accum_str[MALLCTL_STR_LEN];
+ if (ind < SC_NBINS) {
+ malloc_snprintf(mallctl_live_str, MALLCTL_STR_LEN,
+ "prof.stats.bins.%u.live", (unsigned)ind);
+ malloc_snprintf(mallctl_accum_str, MALLCTL_STR_LEN,
+ "prof.stats.bins.%u.accum", (unsigned)ind);
+ } else {
+ malloc_snprintf(mallctl_live_str, MALLCTL_STR_LEN,
+ "prof.stats.lextents.%u.live", (unsigned)(ind - SC_NBINS));
+ malloc_snprintf(mallctl_accum_str, MALLCTL_STR_LEN,
+ "prof.stats.lextents.%u.accum", (unsigned)(ind - SC_NBINS));
+ }
+
+ size_t stats_len = 2 * sizeof(uint64_t);
+
+ uint64_t live_stats_orig[2];
+ assert_d_eq(mallctl(mallctl_live_str, &live_stats_orig, &stats_len,
+ NULL, 0), 0, "");
+ uint64_t accum_stats_orig[2];
+ assert_d_eq(mallctl(mallctl_accum_str, &accum_stats_orig, &stats_len,
+ NULL, 0), 0, "");
+
+ void *ptrs[N_PTRS];
+
+ uint64_t live_req_sum = 0;
+ uint64_t live_count = 0;
+ uint64_t accum_req_sum = 0;
+ uint64_t accum_count = 0;
+
+ for (size_t i = 0; i < N_PTRS; ++i) {
+ size_t sz = sizes_array[i];
+ int flags = flags_array[i];
+ void *p = mallocx(sz, flags);
+ assert_ptr_not_null(p, "malloc() failed");
+ assert(TEST_MALLOC_SIZE(p) == sz_index2size(ind));
+ ptrs[i] = p;
+ live_req_sum += sz;
+ live_count++;
+ accum_req_sum += sz;
+ accum_count++;
+ uint64_t live_stats[2];
+ assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len,
+ NULL, 0), 0, "");
+ expect_u64_eq(live_stats[0] - live_stats_orig[0],
+ live_req_sum, "");
+ expect_u64_eq(live_stats[1] - live_stats_orig[1],
+ live_count, "");
+ uint64_t accum_stats[2];
+ assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len,
+ NULL, 0), 0, "");
+ expect_u64_eq(accum_stats[0] - accum_stats_orig[0],
+ accum_req_sum, "");
+ expect_u64_eq(accum_stats[1] - accum_stats_orig[1],
+ accum_count, "");
+ }
+
+ for (size_t i = 0; i < N_PTRS; ++i) {
+ size_t sz = sizes_array[i];
+ int flags = flags_array[i];
+ sdallocx(ptrs[i], sz, flags);
+ live_req_sum -= sz;
+ live_count--;
+ uint64_t live_stats[2];
+ assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len,
+ NULL, 0), 0, "");
+ expect_u64_eq(live_stats[0] - live_stats_orig[0],
+ live_req_sum, "");
+ expect_u64_eq(live_stats[1] - live_stats_orig[1],
+ live_count, "");
+ uint64_t accum_stats[2];
+ assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len,
+ NULL, 0), 0, "");
+ expect_u64_eq(accum_stats[0] - accum_stats_orig[0],
+ accum_req_sum, "");
+ expect_u64_eq(accum_stats[1] - accum_stats_orig[1],
+ accum_count, "");
+ }
+#undef MALLCTL_STR_LEN
+}
+
+static void
+test_szind_wrapper(szind_t ind) {
+ size_t sizes_array[N_PTRS];
+ int flags_array[N_PTRS];
+ for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS;
+ ++i, ++sz) {
+ sizes_array[i] = sz;
+ flags_array[i] = 0;
+ }
+ test_combinations(ind, sizes_array, flags_array);
+}
+
+TEST_BEGIN(test_prof_stats) {
+ test_skip_if(!config_prof);
+ test_szind_wrapper(0);
+ test_szind_wrapper(1);
+ test_szind_wrapper(2);
+ test_szind_wrapper(SC_NBINS);
+ test_szind_wrapper(SC_NBINS + 1);
+ test_szind_wrapper(SC_NBINS + 2);
+}
+TEST_END
+
+static void
+test_szind_aligned_wrapper(szind_t ind, unsigned lg_align) {
+ size_t sizes_array[N_PTRS];
+ int flags_array[N_PTRS];
+ int flags = MALLOCX_LG_ALIGN(lg_align);
+ for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS;
+ ++i, ++sz) {
+ sizes_array[i] = sz;
+ flags_array[i] = flags;
+ }
+ test_combinations(
+ sz_size2index(sz_sa2u(sz_index2size(ind), 1 << lg_align)),
+ sizes_array, flags_array);
+}
+
+TEST_BEGIN(test_prof_stats_aligned) {
+ test_skip_if(!config_prof);
+ for (szind_t ind = 0; ind < 10; ++ind) {
+ for (unsigned lg_align = 0; lg_align < 10; ++lg_align) {
+ test_szind_aligned_wrapper(ind, lg_align);
+ }
+ }
+ for (szind_t ind = SC_NBINS - 5; ind < SC_NBINS + 5; ++ind) {
+ for (unsigned lg_align = SC_LG_LARGE_MINCLASS - 5;
+ lg_align < SC_LG_LARGE_MINCLASS + 5; ++lg_align) {
+ test_szind_aligned_wrapper(ind, lg_align);
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_prof_stats,
+ test_prof_stats_aligned);
+}
diff --git a/deps/jemalloc/test/unit/prof_stats.sh b/deps/jemalloc/test/unit/prof_stats.sh
new file mode 100644
index 0000000..f3c819b
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_stats.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_stats:true"
+fi
diff --git a/deps/jemalloc/test/unit/prof_sys_thread_name.c b/deps/jemalloc/test/unit/prof_sys_thread_name.c
new file mode 100644
index 0000000..affc788
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_sys_thread_name.c
@@ -0,0 +1,77 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_sys.h"
+
+static const char *test_thread_name = "test_name";
+
+static int
+test_prof_sys_thread_name_read_error(char *buf, size_t limit) {
+ return ENOSYS;
+}
+
+static int
+test_prof_sys_thread_name_read(char *buf, size_t limit) {
+ assert(strlen(test_thread_name) < limit);
+ strncpy(buf, test_thread_name, limit);
+ return 0;
+}
+
+static int
+test_prof_sys_thread_name_read_clear(char *buf, size_t limit) {
+ assert(limit > 0);
+ buf[0] = '\0';
+ return 0;
+}
+
+TEST_BEGIN(test_prof_sys_thread_name) {
+ test_skip_if(!config_prof);
+
+ bool oldval;
+ size_t sz = sizeof(oldval);
+ assert_d_eq(mallctl("opt.prof_sys_thread_name", &oldval, &sz, NULL, 0),
+ 0, "mallctl failed");
+ assert_true(oldval, "option was not set correctly");
+
+ const char *thread_name;
+ sz = sizeof(thread_name);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
+ "mallctl read for thread name should not fail");
+ expect_str_eq(thread_name, "", "Initial thread name should be empty");
+
+ thread_name = test_thread_name;
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, sz),
+ ENOENT, "mallctl write for thread name should fail");
+ assert_ptr_eq(thread_name, test_thread_name,
+ "Thread name should not be touched");
+
+ prof_sys_thread_name_read = test_prof_sys_thread_name_read_error;
+ void *p = malloc(1);
+ free(p);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
+ "mallctl read for thread name should not fail");
+ assert_str_eq(thread_name, "",
+ "Thread name should stay the same if the system call fails");
+
+ prof_sys_thread_name_read = test_prof_sys_thread_name_read;
+ p = malloc(1);
+ free(p);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
+ "mallctl read for thread name should not fail");
+ assert_str_eq(thread_name, test_thread_name,
+ "Thread name should be changed if the system call succeeds");
+
+ prof_sys_thread_name_read = test_prof_sys_thread_name_read_clear;
+ p = malloc(1);
+ free(p);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
+ "mallctl read for thread name should not fail");
+ expect_str_eq(thread_name, "", "Thread name should be updated if the "
+ "system call returns a different name");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_prof_sys_thread_name);
+}
diff --git a/deps/jemalloc/test/unit/prof_sys_thread_name.sh b/deps/jemalloc/test/unit/prof_sys_thread_name.sh
new file mode 100644
index 0000000..1f02a8a
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_sys_thread_name.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_sys_thread_name:true"
+fi
diff --git a/deps/jemalloc/test/unit/prof_tctx.c b/deps/jemalloc/test/unit/prof_tctx.c
new file mode 100644
index 0000000..e0efdc3
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_tctx.c
@@ -0,0 +1,48 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_data.h"
+
+TEST_BEGIN(test_prof_realloc) {
+ tsd_t *tsd;
+ int flags;
+ void *p, *q;
+ prof_info_t prof_info_p, prof_info_q;
+ prof_cnt_t cnt_0, cnt_1, cnt_2, cnt_3;
+
+ test_skip_if(!config_prof);
+
+ tsd = tsd_fetch();
+ flags = MALLOCX_TCACHE_NONE;
+
+ prof_cnt_all(&cnt_0);
+ p = mallocx(1024, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ prof_info_get(tsd, p, NULL, &prof_info_p);
+ expect_ptr_ne(prof_info_p.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
+ "Expected valid tctx");
+ prof_cnt_all(&cnt_1);
+ expect_u64_eq(cnt_0.curobjs + 1, cnt_1.curobjs,
+ "Allocation should have increased sample size");
+
+ q = rallocx(p, 2048, flags);
+ expect_ptr_ne(p, q, "Expected move");
+ expect_ptr_not_null(p, "Unexpected rmallocx() failure");
+ prof_info_get(tsd, q, NULL, &prof_info_q);
+ expect_ptr_ne(prof_info_q.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
+ "Expected valid tctx");
+ prof_cnt_all(&cnt_2);
+ expect_u64_eq(cnt_1.curobjs, cnt_2.curobjs,
+ "Reallocation should not have changed sample size");
+
+ dallocx(q, flags);
+ prof_cnt_all(&cnt_3);
+ expect_u64_eq(cnt_0.curobjs, cnt_3.curobjs,
+ "Sample size should have returned to base level");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_prof_realloc);
+}
diff --git a/deps/jemalloc/test/unit/prof_tctx.sh b/deps/jemalloc/test/unit/prof_tctx.sh
new file mode 100644
index 0000000..485f9bf
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_tctx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_thread_name.c b/deps/jemalloc/test/unit/prof_thread_name.c
new file mode 100644
index 0000000..3c4614f
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_thread_name.c
@@ -0,0 +1,122 @@
+#include "test/jemalloc_test.h"
+
+static void
+mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
+ int line) {
+ const char *thread_name_old;
+ size_t sz;
+
+ sz = sizeof(thread_name_old);
+ expect_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
+ NULL, 0), 0,
+ "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ func, line);
+ expect_str_eq(thread_name_old, thread_name_expected,
+ "%s():%d: Unexpected thread.prof.name value", func, line);
+}
+#define mallctl_thread_name_get(a) \
+ mallctl_thread_name_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_thread_name_set_impl(const char *thread_name, const char *func,
+ int line) {
+ expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ (void *)&thread_name, sizeof(thread_name)), 0,
+ "%s():%d: Unexpected mallctl failure writing thread.prof.name",
+ func, line);
+ mallctl_thread_name_get_impl(thread_name, func, line);
+}
+#define mallctl_thread_name_set(a) \
+ mallctl_thread_name_set_impl(a, __func__, __LINE__)
+
+TEST_BEGIN(test_prof_thread_name_validation) {
+ const char *thread_name;
+
+ test_skip_if(!config_prof);
+ test_skip_if(opt_prof_sys_thread_name);
+
+ mallctl_thread_name_get("");
+ mallctl_thread_name_set("hi there");
+
+ /* NULL input shouldn't be allowed. */
+ thread_name = NULL;
+ expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ (void *)&thread_name, sizeof(thread_name)), EFAULT,
+ "Unexpected mallctl result writing \"%s\" to thread.prof.name",
+ thread_name);
+
+ /* '\n' shouldn't be allowed. */
+ thread_name = "hi\nthere";
+ expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ (void *)&thread_name, sizeof(thread_name)), EFAULT,
+ "Unexpected mallctl result writing \"%s\" to thread.prof.name",
+ thread_name);
+
+ /* Simultaneous read/write shouldn't be allowed. */
+ {
+ const char *thread_name_old;
+ size_t sz;
+
+ sz = sizeof(thread_name_old);
+ expect_d_eq(mallctl("thread.prof.name",
+ (void *)&thread_name_old, &sz, (void *)&thread_name,
+ sizeof(thread_name)), EPERM,
+ "Unexpected mallctl result writing \"%s\" to "
+ "thread.prof.name", thread_name);
+ }
+
+ mallctl_thread_name_set("");
+}
+TEST_END
+
+#define NTHREADS 4
+#define NRESET 25
+static void *
+thd_start(void *varg) {
+ unsigned thd_ind = *(unsigned *)varg;
+ char thread_name[16] = "";
+ unsigned i;
+
+ malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind);
+
+ mallctl_thread_name_get("");
+ mallctl_thread_name_set(thread_name);
+
+ for (i = 0; i < NRESET; i++) {
+ expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected error while resetting heap profile data");
+ mallctl_thread_name_get(thread_name);
+ }
+
+ mallctl_thread_name_set(thread_name);
+ mallctl_thread_name_set("");
+
+ return NULL;
+}
+
+TEST_BEGIN(test_prof_thread_name_threaded) {
+ test_skip_if(!config_prof);
+ test_skip_if(opt_prof_sys_thread_name);
+
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+#undef NTHREADS
+#undef NRESET
+
+int
+main(void) {
+ return test(
+ test_prof_thread_name_validation,
+ test_prof_thread_name_threaded);
+}
diff --git a/deps/jemalloc/test/unit/prof_thread_name.sh b/deps/jemalloc/test/unit/prof_thread_name.sh
new file mode 100644
index 0000000..298c105
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_thread_name.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:false"
+fi
diff --git a/deps/jemalloc/test/unit/psset.c b/deps/jemalloc/test/unit/psset.c
new file mode 100644
index 0000000..6ff7201
--- /dev/null
+++ b/deps/jemalloc/test/unit/psset.c
@@ -0,0 +1,748 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/psset.h"
+
+#define PAGESLAB_ADDR ((void *)(1234 * HUGEPAGE))
+#define PAGESLAB_AGE 5678
+
+#define ALLOC_ARENA_IND 111
+#define ALLOC_ESN 222
+
+static void
+edata_init_test(edata_t *edata) {
+ memset(edata, 0, sizeof(*edata));
+ edata_arena_ind_set(edata, ALLOC_ARENA_IND);
+ edata_esn_set(edata, ALLOC_ESN);
+}
+
+static void
+test_psset_fake_purge(hpdata_t *ps) {
+ hpdata_purge_state_t purge_state;
+ hpdata_alloc_allowed_set(ps, false);
+ hpdata_purge_begin(ps, &purge_state);
+ void *addr;
+ size_t size;
+ while (hpdata_purge_next(ps, &purge_state, &addr, &size)) {
+ }
+ hpdata_purge_end(ps, &purge_state);
+ hpdata_alloc_allowed_set(ps, true);
+}
+
+static void
+test_psset_alloc_new(psset_t *psset, hpdata_t *ps, edata_t *r_edata,
+ size_t size) {
+ hpdata_assert_empty(ps);
+
+ test_psset_fake_purge(ps);
+
+ psset_insert(psset, ps);
+ psset_update_begin(psset, ps);
+
+ void *addr = hpdata_reserve_alloc(ps, size);
+ edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size,
+ /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
+ /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
+ EXTENT_NOT_HEAD);
+ edata_ps_set(r_edata, ps);
+ psset_update_end(psset, ps);
+}
+
+static bool
+test_psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
+ hpdata_t *ps = psset_pick_alloc(psset, size);
+ if (ps == NULL) {
+ return true;
+ }
+ psset_update_begin(psset, ps);
+ void *addr = hpdata_reserve_alloc(ps, size);
+ edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size,
+ /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
+ /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
+ EXTENT_NOT_HEAD);
+ edata_ps_set(r_edata, ps);
+ psset_update_end(psset, ps);
+ return false;
+}
+
+static hpdata_t *
+test_psset_dalloc(psset_t *psset, edata_t *edata) {
+ hpdata_t *ps = edata_ps_get(edata);
+ psset_update_begin(psset, ps);
+ hpdata_unreserve(ps, edata_addr_get(edata), edata_size_get(edata));
+ psset_update_end(psset, ps);
+ if (hpdata_empty(ps)) {
+ psset_remove(psset, ps);
+ return ps;
+ } else {
+ return NULL;
+ }
+}
+
+static void
+edata_expect(edata_t *edata, size_t page_offset, size_t page_cnt) {
+ /*
+ * Note that allocations should get the arena ind of their home
+ * arena, *not* the arena ind of the pageslab allocator.
+ */
+ expect_u_eq(ALLOC_ARENA_IND, edata_arena_ind_get(edata),
+ "Arena ind changed");
+ expect_ptr_eq(
+ (void *)((uintptr_t)PAGESLAB_ADDR + (page_offset << LG_PAGE)),
+ edata_addr_get(edata), "Didn't allocate in order");
+ expect_zu_eq(page_cnt << LG_PAGE, edata_size_get(edata), "");
+ expect_false(edata_slab_get(edata), "");
+ expect_u_eq(SC_NSIZES, edata_szind_get_maybe_invalid(edata),
+ "");
+ expect_u64_eq(0, edata_sn_get(edata), "");
+ expect_d_eq(edata_state_get(edata), extent_state_active, "");
+ expect_false(edata_zeroed_get(edata), "");
+ expect_true(edata_committed_get(edata), "");
+ expect_d_eq(EXTENT_PAI_HPA, edata_pai_get(edata), "");
+ expect_false(edata_is_head_get(edata), "");
+}
+
+TEST_BEGIN(test_empty) {
+ bool err;
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc;
+ edata_init_test(&alloc);
+
+ psset_t psset;
+ psset_init(&psset);
+
+ /* Empty psset should return fail allocations. */
+ err = test_psset_alloc_reuse(&psset, &alloc, PAGE);
+ expect_true(err, "Empty psset succeeded in an allocation.");
+}
+TEST_END
+
+TEST_BEGIN(test_fill) {
+ bool err;
+
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc[HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ }
+
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ edata_t *edata = &alloc[i];
+ edata_expect(edata, i, 1);
+ }
+
+ /* The pageslab, and thus psset, should now have no allocations. */
+ edata_t extra_alloc;
+ edata_init_test(&extra_alloc);
+ err = test_psset_alloc_reuse(&psset, &extra_alloc, PAGE);
+ expect_true(err, "Alloc succeeded even though psset should be empty");
+}
+TEST_END
+
+TEST_BEGIN(test_reuse) {
+ bool err;
+ hpdata_t *ps;
+
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc[HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ }
+
+ /* Free odd indices. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i ++) {
+ if (i % 2 == 0) {
+ continue;
+ }
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ }
+ /* Realloc into them. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ if (i % 2 == 0) {
+ continue;
+ }
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ edata_expect(&alloc[i], i, 1);
+ }
+ /* Now, free the pages at indices 0 or 1 mod 2. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ if (i % 4 > 1) {
+ continue;
+ }
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ }
+ /* And realloc 2-page allocations into them. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ if (i % 4 != 0) {
+ continue;
+ }
+ err = test_psset_alloc_reuse(&psset, &alloc[i], 2 * PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ edata_expect(&alloc[i], i, 2);
+ }
+ /* Free all the 2-page allocations. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ if (i % 4 != 0) {
+ continue;
+ }
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ }
+ /*
+ * Free up a 1-page hole next to a 2-page hole, but somewhere in the
+ * middle of the pageslab. Index 11 should be right before such a hole
+ * (since 12 % 4 == 0).
+ */
+ size_t index_of_3 = 11;
+ ps = test_psset_dalloc(&psset, &alloc[index_of_3]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ err = test_psset_alloc_reuse(&psset, &alloc[index_of_3], 3 * PAGE);
+ expect_false(err, "Should have been able to find alloc.");
+ edata_expect(&alloc[index_of_3], index_of_3, 3);
+
+ /*
+ * Free up a 4-page hole at the end. Recall that the pages at offsets 0
+ * and 1 mod 4 were freed above, so we just have to free the last
+ * allocations.
+ */
+ ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 2]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+
+ /* Make sure we can satisfy an allocation at the very end of a slab. */
+ size_t index_of_4 = HUGEPAGE_PAGES - 4;
+ err = test_psset_alloc_reuse(&psset, &alloc[index_of_4], 4 * PAGE);
+ expect_false(err, "Should have been able to find alloc.");
+ edata_expect(&alloc[index_of_4], index_of_4, 4);
+}
+TEST_END
+
+TEST_BEGIN(test_evict) {
+ bool err;
+ hpdata_t *ps;
+
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc[HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+
+ /* Alloc the whole slab. */
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Unxpected allocation failure");
+ }
+
+ /* Dealloc the whole slab, going forwards. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES - 1; i++) {
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ }
+ ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_eq(&pageslab, ps, "Empty pageslab not evicted.");
+
+ err = test_psset_alloc_reuse(&psset, &alloc[0], PAGE);
+ expect_true(err, "psset should be empty.");
+}
+TEST_END
+
+TEST_BEGIN(test_multi_pageslab) {
+ bool err;
+ hpdata_t *ps;
+
+ hpdata_t pageslab[2];
+ hpdata_init(&pageslab[0], PAGESLAB_ADDR, PAGESLAB_AGE);
+ hpdata_init(&pageslab[1],
+ (void *)((uintptr_t)PAGESLAB_ADDR + HUGEPAGE),
+ PAGESLAB_AGE + 1);
+
+ edata_t alloc[2][HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+
+ /* Insert both slabs. */
+ edata_init_test(&alloc[0][0]);
+ test_psset_alloc_new(&psset, &pageslab[0], &alloc[0][0], PAGE);
+ edata_init_test(&alloc[1][0]);
+ test_psset_alloc_new(&psset, &pageslab[1], &alloc[1][0], PAGE);
+
+ /* Fill them both up; make sure we do so in first-fit order. */
+ for (size_t i = 0; i < 2; i++) {
+ for (size_t j = 1; j < HUGEPAGE_PAGES; j++) {
+ edata_init_test(&alloc[i][j]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i][j], PAGE);
+ expect_false(err,
+ "Nonempty psset failed page allocation.");
+ assert_ptr_eq(&pageslab[i], edata_ps_get(&alloc[i][j]),
+ "Didn't pick pageslabs in first-fit");
+ }
+ }
+
+ /*
+ * Free up a 2-page hole in the earlier slab, and a 1-page one in the
+ * later one. We should still pick the later one.
+ */
+ ps = test_psset_dalloc(&psset, &alloc[0][0]);
+ expect_ptr_null(ps, "Unexpected eviction");
+ ps = test_psset_dalloc(&psset, &alloc[0][1]);
+ expect_ptr_null(ps, "Unexpected eviction");
+ ps = test_psset_dalloc(&psset, &alloc[1][0]);
+ expect_ptr_null(ps, "Unexpected eviction");
+ err = test_psset_alloc_reuse(&psset, &alloc[0][0], PAGE);
+ expect_ptr_eq(&pageslab[1], edata_ps_get(&alloc[0][0]),
+ "Should have picked the fuller pageslab");
+
+ /*
+ * Now both slabs have 1-page holes. Free up a second one in the later
+ * slab.
+ */
+ ps = test_psset_dalloc(&psset, &alloc[1][1]);
+ expect_ptr_null(ps, "Unexpected eviction");
+
+ /*
+ * We should be able to allocate a 2-page object, even though an earlier
+ * size class is nonempty.
+ */
+ err = test_psset_alloc_reuse(&psset, &alloc[1][0], 2 * PAGE);
+ expect_false(err, "Allocation should have succeeded");
+}
+TEST_END
+
+static void
+stats_expect_empty(psset_bin_stats_t *stats) {
+ assert_zu_eq(0, stats->npageslabs,
+ "Supposedly empty bin had positive npageslabs");
+ expect_zu_eq(0, stats->nactive, "Unexpected nonempty bin"
+ "Supposedly empty bin had positive nactive");
+}
+
+static void
+stats_expect(psset_t *psset, size_t nactive) {
+ if (nactive == HUGEPAGE_PAGES) {
+ expect_zu_eq(1, psset->stats.full_slabs[0].npageslabs,
+ "Expected a full slab");
+ expect_zu_eq(HUGEPAGE_PAGES,
+ psset->stats.full_slabs[0].nactive,
+ "Should have exactly filled the bin");
+ } else {
+ stats_expect_empty(&psset->stats.full_slabs[0]);
+ }
+ size_t ninactive = HUGEPAGE_PAGES - nactive;
+ pszind_t nonempty_pind = PSSET_NPSIZES;
+ if (ninactive != 0 && ninactive < HUGEPAGE_PAGES) {
+ nonempty_pind = sz_psz2ind(sz_psz_quantize_floor(
+ ninactive << LG_PAGE));
+ }
+ for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
+ if (i == nonempty_pind) {
+ assert_zu_eq(1,
+ psset->stats.nonfull_slabs[i][0].npageslabs,
+ "Should have found a slab");
+ expect_zu_eq(nactive,
+ psset->stats.nonfull_slabs[i][0].nactive,
+ "Mismatch in active pages");
+ } else {
+ stats_expect_empty(&psset->stats.nonfull_slabs[i][0]);
+ }
+ }
+ expect_zu_eq(nactive, psset_nactive(psset), "");
+}
+
+TEST_BEGIN(test_stats) {
+ bool err;
+
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc[HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+ stats_expect(&psset, 0);
+
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ stats_expect(&psset, i);
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ }
+ stats_expect(&psset, HUGEPAGE_PAGES);
+ hpdata_t *ps;
+ for (ssize_t i = HUGEPAGE_PAGES - 1; i >= 0; i--) {
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_true((ps == NULL) == (i != 0),
+ "test_psset_dalloc should only evict a slab on the last "
+ "free");
+ stats_expect(&psset, i);
+ }
+
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ stats_expect(&psset, 1);
+ psset_update_begin(&psset, &pageslab);
+ stats_expect(&psset, 0);
+ psset_update_end(&psset, &pageslab);
+ stats_expect(&psset, 1);
+}
+TEST_END
+
+/*
+ * Fills in and inserts two pageslabs, with the first better than the second,
+ * and each fully allocated (into the allocations in allocs and worse_allocs,
+ * each of which should be HUGEPAGE_PAGES long), except for a single free page
+ * at the end.
+ *
+ * (There's nothing magic about these numbers; it's just useful to share the
+ * setup between the oldest fit and the insert/remove test).
+ */
+static void
+init_test_pageslabs(psset_t *psset, hpdata_t *pageslab,
+ hpdata_t *worse_pageslab, edata_t *alloc, edata_t *worse_alloc) {
+ bool err;
+
+ hpdata_init(pageslab, (void *)(10 * HUGEPAGE), PAGESLAB_AGE);
+ /*
+ * This pageslab would be better from an address-first-fit POV, but
+ * worse from an age POV.
+ */
+ hpdata_init(worse_pageslab, (void *)(9 * HUGEPAGE), PAGESLAB_AGE + 1);
+
+ psset_init(psset);
+
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(psset, pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ expect_ptr_eq(pageslab, edata_ps_get(&alloc[i]),
+ "Allocated from the wrong pageslab");
+ }
+
+ edata_init_test(&worse_alloc[0]);
+ test_psset_alloc_new(psset, worse_pageslab, &worse_alloc[0], PAGE);
+ expect_ptr_eq(worse_pageslab, edata_ps_get(&worse_alloc[0]),
+ "Allocated from the wrong pageslab");
+ /*
+ * Make the two pssets otherwise indistinguishable; all full except for
+ * a single page.
+ */
+ for (size_t i = 1; i < HUGEPAGE_PAGES - 1; i++) {
+ edata_init_test(&worse_alloc[i]);
+ err = test_psset_alloc_reuse(psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ expect_ptr_eq(worse_pageslab, edata_ps_get(&alloc[i]),
+ "Allocated from the wrong pageslab");
+ }
+
+ /* Deallocate the last page from the older pageslab. */
+ hpdata_t *evicted = test_psset_dalloc(psset,
+ &alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_null(evicted, "Unexpected eviction");
+}
+
+TEST_BEGIN(test_oldest_fit) {
+ bool err;
+ edata_t alloc[HUGEPAGE_PAGES];
+ edata_t worse_alloc[HUGEPAGE_PAGES];
+
+ hpdata_t pageslab;
+ hpdata_t worse_pageslab;
+
+ psset_t psset;
+
+ init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc,
+ worse_alloc);
+
+ /* The edata should come from the better pageslab. */
+ edata_t test_edata;
+ edata_init_test(&test_edata);
+ err = test_psset_alloc_reuse(&psset, &test_edata, PAGE);
+ expect_false(err, "Nonempty psset failed page allocation");
+ expect_ptr_eq(&pageslab, edata_ps_get(&test_edata),
+ "Allocated from the wrong pageslab");
+}
+TEST_END
+
+TEST_BEGIN(test_insert_remove) {
+ bool err;
+ hpdata_t *ps;
+ edata_t alloc[HUGEPAGE_PAGES];
+ edata_t worse_alloc[HUGEPAGE_PAGES];
+
+ hpdata_t pageslab;
+ hpdata_t worse_pageslab;
+
+ psset_t psset;
+
+ init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc,
+ worse_alloc);
+
+ /* Remove better; should still be able to alloc from worse. */
+ psset_update_begin(&psset, &pageslab);
+ err = test_psset_alloc_reuse(&psset, &worse_alloc[HUGEPAGE_PAGES - 1],
+ PAGE);
+ expect_false(err, "Removal should still leave an empty page");
+ expect_ptr_eq(&worse_pageslab,
+ edata_ps_get(&worse_alloc[HUGEPAGE_PAGES - 1]),
+ "Allocated out of wrong ps");
+
+ /*
+ * After deallocating the previous alloc and reinserting better, it
+ * should be preferred for future allocations.
+ */
+ ps = test_psset_dalloc(&psset, &worse_alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_null(ps, "Incorrect eviction of nonempty pageslab");
+ psset_update_end(&psset, &pageslab);
+ err = test_psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
+ expect_false(err, "psset should be nonempty");
+ expect_ptr_eq(&pageslab, edata_ps_get(&alloc[HUGEPAGE_PAGES - 1]),
+ "Removal/reinsertion shouldn't change ordering");
+ /*
+ * After deallocating and removing both, allocations should fail.
+ */
+ ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_null(ps, "Incorrect eviction");
+ psset_update_begin(&psset, &pageslab);
+ psset_update_begin(&psset, &worse_pageslab);
+ err = test_psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
+ expect_true(err, "psset should be empty, but an alloc succeeded");
+}
+TEST_END
+
+TEST_BEGIN(test_purge_prefers_nonhuge) {
+ /*
+ * All else being equal, we should prefer purging non-huge pages over
+ * huge ones for non-empty extents.
+ */
+
+ /* Nothing magic about this constant. */
+ enum {
+ NHP = 23,
+ };
+ hpdata_t *hpdata;
+
+ psset_t psset;
+ psset_init(&psset);
+
+ hpdata_t hpdata_huge[NHP];
+ uintptr_t huge_begin = (uintptr_t)&hpdata_huge[0];
+ uintptr_t huge_end = (uintptr_t)&hpdata_huge[NHP];
+ hpdata_t hpdata_nonhuge[NHP];
+ uintptr_t nonhuge_begin = (uintptr_t)&hpdata_nonhuge[0];
+ uintptr_t nonhuge_end = (uintptr_t)&hpdata_nonhuge[NHP];
+
+ for (size_t i = 0; i < NHP; i++) {
+ hpdata_init(&hpdata_huge[i], (void *)((10 + i) * HUGEPAGE),
+ 123 + i);
+ psset_insert(&psset, &hpdata_huge[i]);
+
+ hpdata_init(&hpdata_nonhuge[i],
+ (void *)((10 + NHP + i) * HUGEPAGE),
+ 456 + i);
+ psset_insert(&psset, &hpdata_nonhuge[i]);
+
+ }
+ for (int i = 0; i < 2 * NHP; i++) {
+ hpdata = psset_pick_alloc(&psset, HUGEPAGE * 3 / 4);
+ psset_update_begin(&psset, hpdata);
+ void *ptr;
+ ptr = hpdata_reserve_alloc(hpdata, HUGEPAGE * 3 / 4);
+ /* Ignore the first alloc, which will stick around. */
+ (void)ptr;
+ /*
+ * The second alloc is to dirty the pages; free it immediately
+ * after allocating.
+ */
+ ptr = hpdata_reserve_alloc(hpdata, HUGEPAGE / 4);
+ hpdata_unreserve(hpdata, ptr, HUGEPAGE / 4);
+
+ if (huge_begin <= (uintptr_t)hpdata
+ && (uintptr_t)hpdata < huge_end) {
+ hpdata_hugify(hpdata);
+ }
+
+ hpdata_purge_allowed_set(hpdata, true);
+ psset_update_end(&psset, hpdata);
+ }
+
+ /*
+ * We've got a bunch of 1/8th dirty hpdatas. It should give us all the
+ * non-huge ones to purge, then all the huge ones, then refuse to purge
+ * further.
+ */
+ for (int i = 0; i < NHP; i++) {
+ hpdata = psset_pick_purge(&psset);
+ assert_true(nonhuge_begin <= (uintptr_t)hpdata
+ && (uintptr_t)hpdata < nonhuge_end, "");
+ psset_update_begin(&psset, hpdata);
+ test_psset_fake_purge(hpdata);
+ hpdata_purge_allowed_set(hpdata, false);
+ psset_update_end(&psset, hpdata);
+ }
+ for (int i = 0; i < NHP; i++) {
+ hpdata = psset_pick_purge(&psset);
+ expect_true(huge_begin <= (uintptr_t)hpdata
+ && (uintptr_t)hpdata < huge_end, "");
+ psset_update_begin(&psset, hpdata);
+ hpdata_dehugify(hpdata);
+ test_psset_fake_purge(hpdata);
+ hpdata_purge_allowed_set(hpdata, false);
+ psset_update_end(&psset, hpdata);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_purge_prefers_empty) {
+ void *ptr;
+
+ psset_t psset;
+ psset_init(&psset);
+
+ hpdata_t hpdata_empty;
+ hpdata_t hpdata_nonempty;
+ hpdata_init(&hpdata_empty, (void *)(10 * HUGEPAGE), 123);
+ psset_insert(&psset, &hpdata_empty);
+ hpdata_init(&hpdata_nonempty, (void *)(11 * HUGEPAGE), 456);
+ psset_insert(&psset, &hpdata_nonempty);
+
+ psset_update_begin(&psset, &hpdata_empty);
+ ptr = hpdata_reserve_alloc(&hpdata_empty, PAGE);
+ expect_ptr_eq(hpdata_addr_get(&hpdata_empty), ptr, "");
+ hpdata_unreserve(&hpdata_empty, ptr, PAGE);
+ hpdata_purge_allowed_set(&hpdata_empty, true);
+ psset_update_end(&psset, &hpdata_empty);
+
+ psset_update_begin(&psset, &hpdata_nonempty);
+ ptr = hpdata_reserve_alloc(&hpdata_nonempty, 10 * PAGE);
+ expect_ptr_eq(hpdata_addr_get(&hpdata_nonempty), ptr, "");
+ hpdata_unreserve(&hpdata_nonempty, ptr, 9 * PAGE);
+ hpdata_purge_allowed_set(&hpdata_nonempty, true);
+ psset_update_end(&psset, &hpdata_nonempty);
+
+ /*
+ * The nonempty slab has 9 dirty pages, while the empty one has only 1.
+ * We should still pick the empty one for purging.
+ */
+ hpdata_t *to_purge = psset_pick_purge(&psset);
+ expect_ptr_eq(&hpdata_empty, to_purge, "");
+}
+TEST_END
+
+TEST_BEGIN(test_purge_prefers_empty_huge) {
+ void *ptr;
+
+ psset_t psset;
+ psset_init(&psset);
+
+ enum {NHP = 10 };
+
+ hpdata_t hpdata_huge[NHP];
+ hpdata_t hpdata_nonhuge[NHP];
+
+ uintptr_t cur_addr = 100 * HUGEPAGE;
+ uint64_t cur_age = 123;
+ for (int i = 0; i < NHP; i++) {
+ hpdata_init(&hpdata_huge[i], (void *)cur_addr, cur_age);
+ cur_addr += HUGEPAGE;
+ cur_age++;
+ psset_insert(&psset, &hpdata_huge[i]);
+
+ hpdata_init(&hpdata_nonhuge[i], (void *)cur_addr, cur_age);
+ cur_addr += HUGEPAGE;
+ cur_age++;
+ psset_insert(&psset, &hpdata_nonhuge[i]);
+
+ /*
+ * Make the hpdata_huge[i] fully dirty, empty, purgable, and
+ * huge.
+ */
+ psset_update_begin(&psset, &hpdata_huge[i]);
+ ptr = hpdata_reserve_alloc(&hpdata_huge[i], HUGEPAGE);
+ expect_ptr_eq(hpdata_addr_get(&hpdata_huge[i]), ptr, "");
+ hpdata_hugify(&hpdata_huge[i]);
+ hpdata_unreserve(&hpdata_huge[i], ptr, HUGEPAGE);
+ hpdata_purge_allowed_set(&hpdata_huge[i], true);
+ psset_update_end(&psset, &hpdata_huge[i]);
+
+ /*
+ * Make hpdata_nonhuge[i] fully dirty, empty, purgable, and
+ * non-huge.
+ */
+ psset_update_begin(&psset, &hpdata_nonhuge[i]);
+ ptr = hpdata_reserve_alloc(&hpdata_nonhuge[i], HUGEPAGE);
+ expect_ptr_eq(hpdata_addr_get(&hpdata_nonhuge[i]), ptr, "");
+ hpdata_unreserve(&hpdata_nonhuge[i], ptr, HUGEPAGE);
+ hpdata_purge_allowed_set(&hpdata_nonhuge[i], true);
+ psset_update_end(&psset, &hpdata_nonhuge[i]);
+ }
+
+ /*
+ * We have a bunch of empty slabs, half huge, half nonhuge, inserted in
+ * alternating order. We should pop all the huge ones before popping
+ * any of the non-huge ones for purging.
+ */
+ for (int i = 0; i < NHP; i++) {
+ hpdata_t *to_purge = psset_pick_purge(&psset);
+ expect_ptr_eq(&hpdata_huge[i], to_purge, "");
+ psset_update_begin(&psset, to_purge);
+ hpdata_purge_allowed_set(to_purge, false);
+ psset_update_end(&psset, to_purge);
+ }
+ for (int i = 0; i < NHP; i++) {
+ hpdata_t *to_purge = psset_pick_purge(&psset);
+ expect_ptr_eq(&hpdata_nonhuge[i], to_purge, "");
+ psset_update_begin(&psset, to_purge);
+ hpdata_purge_allowed_set(to_purge, false);
+ psset_update_end(&psset, to_purge);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_empty,
+ test_fill,
+ test_reuse,
+ test_evict,
+ test_multi_pageslab,
+ test_stats,
+ test_oldest_fit,
+ test_insert_remove,
+ test_purge_prefers_nonhuge,
+ test_purge_prefers_empty,
+ test_purge_prefers_empty_huge);
+}
diff --git a/deps/jemalloc/test/unit/ql.c b/deps/jemalloc/test/unit/ql.c
new file mode 100644
index 0000000..f913058
--- /dev/null
+++ b/deps/jemalloc/test/unit/ql.c
@@ -0,0 +1,317 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ql.h"
+
+/* Number of ring entries, in [2..26]. */
+#define NENTRIES 9
+
+typedef struct list_s list_t;
+typedef ql_head(list_t) list_head_t;
+
+struct list_s {
+ ql_elm(list_t) link;
+ char id;
+};
+
+static void
+test_empty_list(list_head_t *head) {
+ list_t *t;
+ unsigned i;
+
+ expect_true(ql_empty(head), "Unexpected element for empty list");
+ expect_ptr_null(ql_first(head), "Unexpected element for empty list");
+ expect_ptr_null(ql_last(head, link),
+ "Unexpected element for empty list");
+
+ i = 0;
+ ql_foreach(t, head, link) {
+ i++;
+ }
+ expect_u_eq(i, 0, "Unexpected element for empty list");
+
+ i = 0;
+ ql_reverse_foreach(t, head, link) {
+ i++;
+ }
+ expect_u_eq(i, 0, "Unexpected element for empty list");
+}
+
+TEST_BEGIN(test_ql_empty) {
+ list_head_t head;
+
+ ql_new(&head);
+ test_empty_list(&head);
+}
+TEST_END
+
+static void
+init_entries(list_t *entries, unsigned nentries) {
+ unsigned i;
+
+ for (i = 0; i < nentries; i++) {
+ entries[i].id = 'a' + i;
+ ql_elm_new(&entries[i], link);
+ }
+}
+
+static void
+test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) {
+ list_t *t;
+ unsigned i;
+
+ expect_false(ql_empty(head), "List should not be empty");
+ expect_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
+ expect_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
+ "Element id mismatch");
+
+ i = 0;
+ ql_foreach(t, head, link) {
+ expect_c_eq(t->id, entries[i].id, "Element id mismatch");
+ i++;
+ }
+
+ i = 0;
+ ql_reverse_foreach(t, head, link) {
+ expect_c_eq(t->id, entries[nentries-i-1].id,
+ "Element id mismatch");
+ i++;
+ }
+
+ for (i = 0; i < nentries-1; i++) {
+ t = ql_next(head, &entries[i], link);
+ expect_c_eq(t->id, entries[i+1].id, "Element id mismatch");
+ }
+ expect_ptr_null(ql_next(head, &entries[nentries-1], link),
+ "Unexpected element");
+
+ expect_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
+ for (i = 1; i < nentries; i++) {
+ t = ql_prev(head, &entries[i], link);
+ expect_c_eq(t->id, entries[i-1].id, "Element id mismatch");
+ }
+}
+
+TEST_BEGIN(test_ql_tail_insert) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_tail_insert(&head, &entries[i], link);
+ }
+
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_tail_remove) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_tail_insert(&head, &entries[i], link);
+ }
+
+ for (i = 0; i < NENTRIES; i++) {
+ test_entries_list(&head, entries, NENTRIES-i);
+ ql_tail_remove(&head, list_t, link);
+ }
+ test_empty_list(&head);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_head_insert) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+ }
+
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_head_remove) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+ }
+
+ for (i = 0; i < NENTRIES; i++) {
+ test_entries_list(&head, &entries[i], NENTRIES-i);
+ ql_head_remove(&head, list_t, link);
+ }
+ test_empty_list(&head);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_insert) {
+ list_head_t head;
+ list_t entries[8];
+ list_t *a, *b, *c, *d, *e, *f, *g, *h;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ a = &entries[0];
+ b = &entries[1];
+ c = &entries[2];
+ d = &entries[3];
+ e = &entries[4];
+ f = &entries[5];
+ g = &entries[6];
+ h = &entries[7];
+
+ /*
+ * ql_remove(), ql_before_insert(), and ql_after_insert() are used
+ * internally by other macros that are already tested, so there's no
+ * need to test them completely. However, insertion/deletion from the
+ * middle of lists is not otherwise tested; do so here.
+ */
+ ql_tail_insert(&head, f, link);
+ ql_before_insert(&head, f, b, link);
+ ql_before_insert(&head, f, c, link);
+ ql_after_insert(f, h, link);
+ ql_after_insert(f, g, link);
+ ql_before_insert(&head, b, a, link);
+ ql_after_insert(c, d, link);
+ ql_before_insert(&head, f, e, link);
+
+ test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
+}
+TEST_END
+
+static void
+test_concat_split_entries(list_t *entries, unsigned nentries_a,
+ unsigned nentries_b) {
+ init_entries(entries, nentries_a + nentries_b);
+
+ list_head_t head_a;
+ ql_new(&head_a);
+ for (unsigned i = 0; i < nentries_a; i++) {
+ ql_tail_insert(&head_a, &entries[i], link);
+ }
+ if (nentries_a == 0) {
+ test_empty_list(&head_a);
+ } else {
+ test_entries_list(&head_a, entries, nentries_a);
+ }
+
+ list_head_t head_b;
+ ql_new(&head_b);
+ for (unsigned i = 0; i < nentries_b; i++) {
+ ql_tail_insert(&head_b, &entries[nentries_a + i], link);
+ }
+ if (nentries_b == 0) {
+ test_empty_list(&head_b);
+ } else {
+ test_entries_list(&head_b, entries + nentries_a, nentries_b);
+ }
+
+ ql_concat(&head_a, &head_b, link);
+ if (nentries_a + nentries_b == 0) {
+ test_empty_list(&head_a);
+ } else {
+ test_entries_list(&head_a, entries, nentries_a + nentries_b);
+ }
+ test_empty_list(&head_b);
+
+ if (nentries_b == 0) {
+ return;
+ }
+
+ list_head_t head_c;
+ ql_split(&head_a, &entries[nentries_a], &head_c, link);
+ if (nentries_a == 0) {
+ test_empty_list(&head_a);
+ } else {
+ test_entries_list(&head_a, entries, nentries_a);
+ }
+ test_entries_list(&head_c, entries + nentries_a, nentries_b);
+}
+
+TEST_BEGIN(test_ql_concat_split) {
+ list_t entries[NENTRIES];
+
+ test_concat_split_entries(entries, 0, 0);
+
+ test_concat_split_entries(entries, 0, 1);
+ test_concat_split_entries(entries, 1, 0);
+
+ test_concat_split_entries(entries, 0, NENTRIES);
+ test_concat_split_entries(entries, 1, NENTRIES - 1);
+ test_concat_split_entries(entries, NENTRIES / 2,
+ NENTRIES - NENTRIES / 2);
+ test_concat_split_entries(entries, NENTRIES - 1, 1);
+ test_concat_split_entries(entries, NENTRIES, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_rotate) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_tail_insert(&head, &entries[i], link);
+ }
+
+ char head_id = ql_first(&head)->id;
+ for (i = 0; i < NENTRIES; i++) {
+ assert_c_eq(ql_first(&head)->id, head_id, "");
+ ql_rotate(&head, link);
+ assert_c_eq(ql_last(&head, link)->id, head_id, "");
+ head_id++;
+ }
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_move) {
+ list_head_t head_dest, head_src;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head_src);
+ ql_move(&head_dest, &head_src);
+ test_empty_list(&head_src);
+ test_empty_list(&head_dest);
+
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_tail_insert(&head_src, &entries[i], link);
+ }
+ ql_move(&head_dest, &head_src);
+ test_empty_list(&head_src);
+ test_entries_list(&head_dest, entries, NENTRIES);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_ql_empty,
+ test_ql_tail_insert,
+ test_ql_tail_remove,
+ test_ql_head_insert,
+ test_ql_head_remove,
+ test_ql_insert,
+ test_ql_concat_split,
+ test_ql_rotate,
+ test_ql_move);
+}
diff --git a/deps/jemalloc/test/unit/qr.c b/deps/jemalloc/test/unit/qr.c
new file mode 100644
index 0000000..16eed0e
--- /dev/null
+++ b/deps/jemalloc/test/unit/qr.c
@@ -0,0 +1,243 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/qr.h"
+
+/* Number of ring entries, in [2..26]. */
+#define NENTRIES 9
+/* Split index, in [1..NENTRIES). */
+#define SPLIT_INDEX 5
+
+typedef struct ring_s ring_t;
+
+struct ring_s {
+ qr(ring_t) link;
+ char id;
+};
+
+static void
+init_entries(ring_t *entries) {
+ unsigned i;
+
+ for (i = 0; i < NENTRIES; i++) {
+ qr_new(&entries[i], link);
+ entries[i].id = 'a' + i;
+ }
+}
+
+static void
+test_independent_entries(ring_t *entries) {
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ j++;
+ }
+ expect_u_eq(j, 1,
+ "Iteration over single-element ring should visit precisely "
+ "one element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ j++;
+ }
+ expect_u_eq(j, 1,
+ "Iteration over single-element ring should visit precisely "
+ "one element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ expect_ptr_eq(t, &entries[i],
+ "Next element in single-element ring should be same as "
+ "current element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ expect_ptr_eq(t, &entries[i],
+ "Previous element in single-element ring should be same as "
+ "current element");
+ }
+}
+
+TEST_BEGIN(test_qr_one) {
+ ring_t entries[NENTRIES];
+
+ init_entries(entries);
+ test_independent_entries(entries);
+}
+TEST_END
+
+static void
+test_entries_ring(ring_t *entries) {
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ expect_c_eq(t->id, entries[(i+j) % NENTRIES].id,
+ "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ expect_c_eq(t->id, entries[(NENTRIES+i-j-1) %
+ NENTRIES].id, "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+}
+
+TEST_BEGIN(test_qr_after_insert) {
+ ring_t entries[NENTRIES];
+ unsigned i;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++) {
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
+ test_entries_ring(entries);
+}
+TEST_END
+
+TEST_BEGIN(test_qr_remove) {
+ ring_t entries[NENTRIES];
+ ring_t *t;
+ unsigned i, j;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++) {
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ expect_c_eq(t->id, entries[i+j].id,
+ "Element id mismatch");
+ j++;
+ }
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ expect_c_eq(t->id, entries[NENTRIES - 1 - j].id,
+ "Element id mismatch");
+ j++;
+ }
+ qr_remove(&entries[i], link);
+ }
+ test_independent_entries(entries);
+}
+TEST_END
+
+TEST_BEGIN(test_qr_before_insert) {
+ ring_t entries[NENTRIES];
+ ring_t *t;
+ unsigned i, j;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++) {
+ qr_before_insert(&entries[i - 1], &entries[i], link);
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ expect_c_eq(t->id, entries[(NENTRIES+i-j) %
+ NENTRIES].id, "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ expect_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
+ "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+}
+TEST_END
+
+static void
+test_split_entries(ring_t *entries) {
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ if (i < SPLIT_INDEX) {
+ expect_c_eq(t->id,
+ entries[(i+j) % SPLIT_INDEX].id,
+ "Element id mismatch");
+ } else {
+ expect_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
+ (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
+ "Element id mismatch");
+ }
+ j++;
+ }
+ }
+}
+
+TEST_BEGIN(test_qr_meld_split) {
+ ring_t entries[NENTRIES];
+ unsigned i;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++) {
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
+
+ qr_split(&entries[0], &entries[SPLIT_INDEX], link);
+ test_split_entries(entries);
+
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
+ test_entries_ring(entries);
+
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
+ test_split_entries(entries);
+
+ qr_split(&entries[0], &entries[SPLIT_INDEX], link);
+ test_entries_ring(entries);
+
+ qr_split(&entries[0], &entries[0], link);
+ test_entries_ring(entries);
+
+ qr_meld(&entries[0], &entries[0], link);
+ test_entries_ring(entries);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_qr_one,
+ test_qr_after_insert,
+ test_qr_remove,
+ test_qr_before_insert,
+ test_qr_meld_split);
+}
diff --git a/deps/jemalloc/test/unit/rb.c b/deps/jemalloc/test/unit/rb.c
new file mode 100644
index 0000000..827ec51
--- /dev/null
+++ b/deps/jemalloc/test/unit/rb.c
@@ -0,0 +1,1019 @@
+#include "test/jemalloc_test.h"
+
+#include <stdlib.h>
+
+#include "jemalloc/internal/rb.h"
+
+#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
+ a_type *rbp_bh_t; \
+ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \
+ NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \
+ rbp_bh_t)) { \
+ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
+ (r_height)++; \
+ } \
+ } \
+} while (0)
+
+static bool summarize_always_returns_true = false;
+
+typedef struct node_s node_t;
+struct node_s {
+#define NODE_MAGIC 0x9823af7e
+ uint32_t magic;
+ rb_node(node_t) link;
+ /* Order used by nodes. */
+ uint64_t key;
+ /*
+ * Our made-up summary property is "specialness", with summarization
+ * taking the max.
+ */
+ uint64_t specialness;
+
+ /*
+ * Used by some of the test randomization to avoid double-removing
+ * nodes.
+ */
+ bool mid_remove;
+
+ /*
+ * To test searching functionality, we want to temporarily weaken the
+ * ordering to allow non-equal nodes that nevertheless compare equal.
+ */
+ bool allow_duplicates;
+
+ /*
+ * In check_consistency, it's handy to know a node's rank in the tree;
+ * this tracks it (but only there; not all tests use this).
+ */
+ int rank;
+ int filtered_rank;
+
+ /*
+ * Replicate the internal structure of the tree, to make sure the
+ * implementation doesn't miss any updates.
+ */
+ const node_t *summary_lchild;
+ const node_t *summary_rchild;
+ uint64_t summary_max_specialness;
+};
+
+static int
+node_cmp(const node_t *a, const node_t *b) {
+ int ret;
+
+ expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+
+ ret = (a->key > b->key) - (a->key < b->key);
+ if (ret == 0 && !a->allow_duplicates) {
+ /*
+ * Duplicates are not allowed in the tree, so force an
+ * arbitrary ordering for non-identical items with equal keys,
+ * unless the user is searching and wants to allow the
+ * duplicate.
+ */
+ ret = (((uintptr_t)a) > ((uintptr_t)b))
+ - (((uintptr_t)a) < ((uintptr_t)b));
+ }
+ return ret;
+}
+
+static uint64_t
+node_subtree_specialness(node_t *n, const node_t *lchild,
+ const node_t *rchild) {
+ uint64_t subtree_specialness = n->specialness;
+ if (lchild != NULL
+ && lchild->summary_max_specialness > subtree_specialness) {
+ subtree_specialness = lchild->summary_max_specialness;
+ }
+ if (rchild != NULL
+ && rchild->summary_max_specialness > subtree_specialness) {
+ subtree_specialness = rchild->summary_max_specialness;
+ }
+ return subtree_specialness;
+}
+
+static bool
+node_summarize(node_t *a, const node_t *lchild, const node_t *rchild) {
+ uint64_t new_summary_max_specialness = node_subtree_specialness(
+ a, lchild, rchild);
+ bool changed = (a->summary_lchild != lchild)
+ || (a->summary_rchild != rchild)
+ || (new_summary_max_specialness != a->summary_max_specialness);
+ a->summary_max_specialness = new_summary_max_specialness;
+ a->summary_lchild = lchild;
+ a->summary_rchild = rchild;
+ return changed || summarize_always_returns_true;
+}
+
+typedef rb_tree(node_t) tree_t;
+rb_summarized_proto(static, tree_, tree_t, node_t);
+rb_summarized_gen(static, tree_, tree_t, node_t, link, node_cmp,
+ node_summarize);
+
+static bool
+specialness_filter_node(void *ctx, node_t *node) {
+ uint64_t specialness = *(uint64_t *)ctx;
+ return node->specialness >= specialness;
+}
+
+static bool
+specialness_filter_subtree(void *ctx, node_t *node) {
+ uint64_t specialness = *(uint64_t *)ctx;
+ return node->summary_max_specialness >= specialness;
+}
+
+static node_t *
+tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
+ unsigned *i = (unsigned *)data;
+ node_t *search_node;
+
+ expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Test rb_search(). */
+ search_node = tree_search(tree, node);
+ expect_ptr_eq(search_node, node,
+ "tree_search() returned unexpected node");
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ expect_ptr_eq(search_node, node,
+ "tree_nsearch() returned unexpected node");
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ expect_ptr_eq(search_node, node,
+ "tree_psearch() returned unexpected node");
+
+ (*i)++;
+
+ return NULL;
+}
+
+TEST_BEGIN(test_rb_empty) {
+ tree_t tree;
+ node_t key;
+
+ tree_new(&tree);
+
+ expect_true(tree_empty(&tree), "Tree should be empty");
+ expect_ptr_null(tree_first(&tree), "Unexpected node");
+ expect_ptr_null(tree_last(&tree), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ expect_ptr_null(tree_search(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ expect_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ expect_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
+
+ unsigned nodes = 0;
+ tree_iter_filtered(&tree, NULL, &tree_iterate_cb,
+ &nodes, &specialness_filter_node, &specialness_filter_subtree,
+ NULL);
+ expect_u_eq(0, nodes, "");
+
+ nodes = 0;
+ tree_reverse_iter_filtered(&tree, NULL, &tree_iterate_cb,
+ &nodes, &specialness_filter_node, &specialness_filter_subtree,
+ NULL);
+ expect_u_eq(0, nodes, "");
+
+ expect_ptr_null(tree_first_filtered(&tree, &specialness_filter_node,
+ &specialness_filter_subtree, NULL), "");
+ expect_ptr_null(tree_last_filtered(&tree, &specialness_filter_node,
+ &specialness_filter_subtree, NULL), "");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ expect_ptr_null(tree_search_filtered(&tree, &key,
+ &specialness_filter_node, &specialness_filter_subtree, NULL), "");
+ expect_ptr_null(tree_nsearch_filtered(&tree, &key,
+ &specialness_filter_node, &specialness_filter_subtree, NULL), "");
+ expect_ptr_null(tree_psearch_filtered(&tree, &key,
+ &specialness_filter_node, &specialness_filter_subtree, NULL), "");
+}
+TEST_END
+
+static unsigned
+tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
+ unsigned ret = 0;
+ node_t *left_node;
+ node_t *right_node;
+
+ if (node == NULL) {
+ return ret;
+ }
+
+ left_node = rbtn_left_get(node_t, link, node);
+ right_node = rbtn_right_get(node_t, link, node);
+
+ expect_ptr_eq(left_node, node->summary_lchild,
+ "summary missed a tree update");
+ expect_ptr_eq(right_node, node->summary_rchild,
+ "summary missed a tree update");
+
+ uint64_t expected_subtree_specialness = node_subtree_specialness(node,
+ left_node, right_node);
+ expect_u64_eq(expected_subtree_specialness,
+ node->summary_max_specialness, "Incorrect summary");
+
+ if (!rbtn_red_get(node_t, link, node)) {
+ black_depth++;
+ }
+
+ /* Red nodes must be interleaved with black nodes. */
+ if (rbtn_red_get(node_t, link, node)) {
+ if (left_node != NULL) {
+ expect_false(rbtn_red_get(node_t, link, left_node),
+ "Node should be black");
+ }
+ if (right_node != NULL) {
+ expect_false(rbtn_red_get(node_t, link, right_node),
+ "Node should be black");
+ }
+ }
+
+ /* Self. */
+ expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Left subtree. */
+ if (left_node != NULL) {
+ ret += tree_recurse(left_node, black_height, black_depth);
+ } else {
+ ret += (black_depth != black_height);
+ }
+
+ /* Right subtree. */
+ if (right_node != NULL) {
+ ret += tree_recurse(right_node, black_height, black_depth);
+ } else {
+ ret += (black_depth != black_height);
+ }
+
+ return ret;
+}
+
+static unsigned
+tree_iterate(tree_t *tree) {
+ unsigned i;
+
+ i = 0;
+ tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
+
+ return i;
+}
+
+static unsigned
+tree_iterate_reverse(tree_t *tree) {
+ unsigned i;
+
+ i = 0;
+ tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
+
+ return i;
+}
+
+static void
+node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
+ node_t *search_node;
+ unsigned black_height, imbalances;
+
+ tree_remove(tree, node);
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ if (search_node != NULL) {
+ expect_u64_ge(search_node->key, node->key,
+ "Key ordering error");
+ }
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ if (search_node != NULL) {
+ expect_u64_le(search_node->key, node->key,
+ "Key ordering error");
+ }
+
+ node->magic = 0;
+
+ rbtn_black_height(node_t, link, tree, black_height);
+ imbalances = tree_recurse(tree->rbt_root, black_height, 0);
+ expect_u_eq(imbalances, 0, "Tree is unbalanced");
+ expect_u_eq(tree_iterate(tree), nnodes-1,
+ "Unexpected node iteration count");
+ expect_u_eq(tree_iterate_reverse(tree), nnodes-1,
+ "Unexpected node iteration count");
+}
+
+static node_t *
+remove_iterate_cb(tree_t *tree, node_t *node, void *data) {
+ unsigned *nnodes = (unsigned *)data;
+ node_t *ret = tree_next(tree, node);
+
+ node_remove(tree, node, *nnodes);
+
+ return ret;
+}
+
+static node_t *
+remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) {
+ unsigned *nnodes = (unsigned *)data;
+ node_t *ret = tree_prev(tree, node);
+
+ node_remove(tree, node, *nnodes);
+
+ return ret;
+}
+
+static void
+destroy_cb(node_t *node, void *data) {
+ unsigned *nnodes = (unsigned *)data;
+
+ expect_u_gt(*nnodes, 0, "Destruction removed too many nodes");
+ (*nnodes)--;
+}
+
+TEST_BEGIN(test_rb_random) {
+ enum {
+ NNODES = 25,
+ NBAGS = 500,
+ SEED = 42
+ };
+ sfmt_t *sfmt;
+ uint64_t bag[NNODES];
+ tree_t tree;
+ node_t nodes[NNODES];
+ unsigned i, j, k, black_height, imbalances;
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 0; i < NBAGS; i++) {
+ switch (i) {
+ case 0:
+ /* Insert in order. */
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = j;
+ }
+ break;
+ case 1:
+ /* Insert in reverse order. */
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = NNODES - j - 1;
+ }
+ break;
+ default:
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
+ }
+
+ /*
+ * We alternate test behavior with a period of 2 here, and a
+ * period of 5 down below, so there's no cycle in which certain
+ * combinations get omitted.
+ */
+ summarize_always_returns_true = (i % 2 == 0);
+
+ for (j = 1; j <= NNODES; j++) {
+ /* Initialize tree and nodes. */
+ tree_new(&tree);
+ for (k = 0; k < j; k++) {
+ nodes[k].magic = NODE_MAGIC;
+ nodes[k].key = bag[k];
+ nodes[k].specialness = gen_rand64_range(sfmt,
+ NNODES);
+ nodes[k].mid_remove = false;
+ nodes[k].allow_duplicates = false;
+ nodes[k].summary_lchild = NULL;
+ nodes[k].summary_rchild = NULL;
+ nodes[k].summary_max_specialness = 0;
+ }
+
+ /* Insert nodes. */
+ for (k = 0; k < j; k++) {
+ tree_insert(&tree, &nodes[k]);
+
+ rbtn_black_height(node_t, link, &tree,
+ black_height);
+ imbalances = tree_recurse(tree.rbt_root,
+ black_height, 0);
+ expect_u_eq(imbalances, 0,
+ "Tree is unbalanced");
+
+ expect_u_eq(tree_iterate(&tree), k+1,
+ "Unexpected node iteration count");
+ expect_u_eq(tree_iterate_reverse(&tree), k+1,
+ "Unexpected node iteration count");
+
+ expect_false(tree_empty(&tree),
+ "Tree should not be empty");
+ expect_ptr_not_null(tree_first(&tree),
+ "Tree should not be empty");
+ expect_ptr_not_null(tree_last(&tree),
+ "Tree should not be empty");
+
+ tree_next(&tree, &nodes[k]);
+ tree_prev(&tree, &nodes[k]);
+ }
+
+ /* Remove nodes. */
+ switch (i % 5) {
+ case 0:
+ for (k = 0; k < j; k++) {
+ node_remove(&tree, &nodes[k], j - k);
+ }
+ break;
+ case 1:
+ for (k = j; k > 0; k--) {
+ node_remove(&tree, &nodes[k-1], k);
+ }
+ break;
+ case 2: {
+ node_t *start;
+ unsigned nnodes = j;
+
+ start = NULL;
+ do {
+ start = tree_iter(&tree, start,
+ remove_iterate_cb, (void *)&nnodes);
+ nnodes--;
+ } while (start != NULL);
+ expect_u_eq(nnodes, 0,
+ "Removal terminated early");
+ break;
+ } case 3: {
+ node_t *start;
+ unsigned nnodes = j;
+
+ start = NULL;
+ do {
+ start = tree_reverse_iter(&tree, start,
+ remove_reverse_iterate_cb,
+ (void *)&nnodes);
+ nnodes--;
+ } while (start != NULL);
+ expect_u_eq(nnodes, 0,
+ "Removal terminated early");
+ break;
+ } case 4: {
+ unsigned nnodes = j;
+ tree_destroy(&tree, destroy_cb, &nnodes);
+ expect_u_eq(nnodes, 0,
+ "Destruction terminated early");
+ break;
+ } default:
+ not_reached();
+ }
+ }
+ }
+ fini_gen_rand(sfmt);
+}
+TEST_END
+
+static void
+expect_simple_consistency(tree_t *tree, uint64_t specialness,
+ bool expected_empty, node_t *expected_first, node_t *expected_last) {
+ bool empty;
+ node_t *first;
+ node_t *last;
+
+ empty = tree_empty_filtered(tree, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_b_eq(expected_empty, empty, "");
+
+ first = tree_first_filtered(tree,
+ &specialness_filter_node, &specialness_filter_subtree,
+ (void *)&specialness);
+ expect_ptr_eq(expected_first, first, "");
+
+ last = tree_last_filtered(tree,
+ &specialness_filter_node, &specialness_filter_subtree,
+ (void *)&specialness);
+ expect_ptr_eq(expected_last, last, "");
+}
+
+TEST_BEGIN(test_rb_filter_simple) {
+ enum {FILTER_NODES = 10};
+ node_t nodes[FILTER_NODES];
+ for (unsigned i = 0; i < FILTER_NODES; i++) {
+ nodes[i].magic = NODE_MAGIC;
+ nodes[i].key = i;
+ if (i == 0) {
+ nodes[i].specialness = 0;
+ } else {
+ nodes[i].specialness = ffs_u(i);
+ }
+ nodes[i].mid_remove = false;
+ nodes[i].allow_duplicates = false;
+ nodes[i].summary_lchild = NULL;
+ nodes[i].summary_rchild = NULL;
+ nodes[i].summary_max_specialness = 0;
+ }
+
+ summarize_always_returns_true = false;
+
+ tree_t tree;
+ tree_new(&tree);
+
+ /* Should be empty */
+ expect_simple_consistency(&tree, /* specialness */ 0, /* empty */ true,
+ /* first */ NULL, /* last */ NULL);
+
+ /* Fill in just the odd nodes. */
+ for (int i = 1; i < FILTER_NODES; i += 2) {
+ tree_insert(&tree, &nodes[i]);
+ }
+
+ /* A search for an odd node should succeed. */
+ expect_simple_consistency(&tree, /* specialness */ 0, /* empty */ false,
+ /* first */ &nodes[1], /* last */ &nodes[9]);
+
+ /* But a search for an even one should fail. */
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ true,
+ /* first */ NULL, /* last */ NULL);
+
+ /* Now we add an even. */
+ tree_insert(&tree, &nodes[4]);
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
+ /* first */ &nodes[4], /* last */ &nodes[4]);
+
+ /* A smaller even, and a larger even. */
+ tree_insert(&tree, &nodes[2]);
+ tree_insert(&tree, &nodes[8]);
+
+ /*
+ * A first-search (resp. last-search) for an even should switch to the
+ * lower (higher) one, now that it's been added.
+ */
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
+ /* first */ &nodes[2], /* last */ &nodes[8]);
+
+ /*
+ * If we remove 2, a first-search we should go back to 4, while a
+ * last-search should remain unchanged.
+ */
+ tree_remove(&tree, &nodes[2]);
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
+ /* first */ &nodes[4], /* last */ &nodes[8]);
+
+ /* Reinsert 2, then find it again. */
+ tree_insert(&tree, &nodes[2]);
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
+ /* first */ &nodes[2], /* last */ &nodes[8]);
+
+ /* Searching for a multiple of 4 should not have changed. */
+ expect_simple_consistency(&tree, /* specialness */ 2, /* empty */ false,
+ /* first */ &nodes[4], /* last */ &nodes[8]);
+
+ /* And a multiple of 8 */
+ expect_simple_consistency(&tree, /* specialness */ 3, /* empty */ false,
+ /* first */ &nodes[8], /* last */ &nodes[8]);
+
+ /* But not a multiple of 16 */
+ expect_simple_consistency(&tree, /* specialness */ 4, /* empty */ true,
+ /* first */ NULL, /* last */ NULL);
+}
+TEST_END
+
+typedef struct iter_ctx_s iter_ctx_t;
+struct iter_ctx_s {
+ int ncalls;
+ node_t *last_node;
+
+ int ncalls_max;
+ bool forward;
+};
+
+static node_t *
+tree_iterate_filtered_cb(tree_t *tree, node_t *node, void *arg) {
+ iter_ctx_t *ctx = (iter_ctx_t *)arg;
+ ctx->ncalls++;
+ expect_u64_ge(node->specialness, 1,
+ "Should only invoke cb on nodes that pass the filter");
+ if (ctx->last_node != NULL) {
+ if (ctx->forward) {
+ expect_d_lt(node_cmp(ctx->last_node, node), 0,
+ "Incorrect iteration order");
+ } else {
+ expect_d_gt(node_cmp(ctx->last_node, node), 0,
+ "Incorrect iteration order");
+ }
+ }
+ ctx->last_node = node;
+ if (ctx->ncalls == ctx->ncalls_max) {
+ return node;
+ }
+ return NULL;
+}
+
+static int
+qsort_node_cmp(const void *ap, const void *bp) {
+ node_t *a = *(node_t **)ap;
+ node_t *b = *(node_t **)bp;
+ return node_cmp(a, b);
+}
+
+#define UPDATE_TEST_MAX 100
+static void
+check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) {
+ uint64_t specialness = 1;
+
+ bool empty;
+ bool real_empty = true;
+ node_t *first;
+ node_t *real_first = NULL;
+ node_t *last;
+ node_t *real_last = NULL;
+ for (int i = 0; i < nnodes; i++) {
+ if (nodes[i].specialness >= specialness) {
+ real_empty = false;
+ if (real_first == NULL
+ || node_cmp(&nodes[i], real_first) < 0) {
+ real_first = &nodes[i];
+ }
+ if (real_last == NULL
+ || node_cmp(&nodes[i], real_last) > 0) {
+ real_last = &nodes[i];
+ }
+ }
+ }
+
+ empty = tree_empty_filtered(tree, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_b_eq(real_empty, empty, "");
+
+ first = tree_first_filtered(tree, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_eq(real_first, first, "");
+
+ last = tree_last_filtered(tree, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_eq(real_last, last, "");
+
+ for (int i = 0; i < nnodes; i++) {
+ node_t *next_filtered;
+ node_t *real_next_filtered = NULL;
+ node_t *prev_filtered;
+ node_t *real_prev_filtered = NULL;
+ for (int j = 0; j < nnodes; j++) {
+ if (nodes[j].specialness < specialness) {
+ continue;
+ }
+ if (node_cmp(&nodes[j], &nodes[i]) < 0
+ && (real_prev_filtered == NULL
+ || node_cmp(&nodes[j], real_prev_filtered) > 0)) {
+ real_prev_filtered = &nodes[j];
+ }
+ if (node_cmp(&nodes[j], &nodes[i]) > 0
+ && (real_next_filtered == NULL
+ || node_cmp(&nodes[j], real_next_filtered) < 0)) {
+ real_next_filtered = &nodes[j];
+ }
+ }
+ next_filtered = tree_next_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_next_filtered, next_filtered, "");
+
+ prev_filtered = tree_prev_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_prev_filtered, prev_filtered, "");
+
+ node_t *search_filtered;
+ node_t *real_search_filtered;
+ node_t *nsearch_filtered;
+ node_t *real_nsearch_filtered;
+ node_t *psearch_filtered;
+ node_t *real_psearch_filtered;
+
+ /*
+ * search, nsearch, psearch from a node before nodes[i] in the
+ * ordering.
+ */
+ node_t before;
+ before.magic = NODE_MAGIC;
+ before.key = nodes[i].key - 1;
+ before.allow_duplicates = false;
+ real_search_filtered = NULL;
+ search_filtered = tree_search_filtered(tree, &before,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_search_filtered, search_filtered, "");
+
+ real_nsearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_next_filtered);
+ nsearch_filtered = tree_nsearch_filtered(tree, &before,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
+
+ real_psearch_filtered = real_prev_filtered;
+ psearch_filtered = tree_psearch_filtered(tree, &before,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
+
+ /* search, nsearch, psearch from nodes[i] */
+ real_search_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : NULL);
+ search_filtered = tree_search_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_search_filtered, search_filtered, "");
+
+ real_nsearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_next_filtered);
+ nsearch_filtered = tree_nsearch_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
+
+ real_psearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_prev_filtered);
+ psearch_filtered = tree_psearch_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
+
+ /*
+ * search, nsearch, psearch from a node equivalent to but
+ * distinct from nodes[i].
+ */
+ node_t equiv;
+ equiv.magic = NODE_MAGIC;
+ equiv.key = nodes[i].key;
+ equiv.allow_duplicates = true;
+ real_search_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : NULL);
+ search_filtered = tree_search_filtered(tree, &equiv,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_search_filtered, search_filtered, "");
+
+ real_nsearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_next_filtered);
+ nsearch_filtered = tree_nsearch_filtered(tree, &equiv,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
+
+ real_psearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_prev_filtered);
+ psearch_filtered = tree_psearch_filtered(tree, &equiv,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
+
+ /*
+ * search, nsearch, psearch from a node after nodes[i] in the
+ * ordering.
+ */
+ node_t after;
+ after.magic = NODE_MAGIC;
+ after.key = nodes[i].key + 1;
+ after.allow_duplicates = false;
+ real_search_filtered = NULL;
+ search_filtered = tree_search_filtered(tree, &after,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_search_filtered, search_filtered, "");
+
+ real_nsearch_filtered = real_next_filtered;
+ nsearch_filtered = tree_nsearch_filtered(tree, &after,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
+
+ real_psearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_prev_filtered);
+ psearch_filtered = tree_psearch_filtered(tree, &after,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
+ }
+
+ /* Filtered iteration test setup. */
+ int nspecial = 0;
+ node_t *sorted_nodes[UPDATE_TEST_MAX];
+ node_t *sorted_filtered_nodes[UPDATE_TEST_MAX];
+ for (int i = 0; i < nnodes; i++) {
+ sorted_nodes[i] = &nodes[i];
+ }
+ qsort(sorted_nodes, nnodes, sizeof(node_t *), &qsort_node_cmp);
+ for (int i = 0; i < nnodes; i++) {
+ sorted_nodes[i]->rank = i;
+ sorted_nodes[i]->filtered_rank = nspecial;
+ if (sorted_nodes[i]->specialness >= 1) {
+ sorted_filtered_nodes[nspecial] = sorted_nodes[i];
+ nspecial++;
+ }
+ }
+
+ node_t *iter_result;
+
+ iter_ctx_t ctx;
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = INT_MAX;
+ ctx.forward = true;
+
+ /* Filtered forward iteration from the beginning. */
+ iter_result = tree_iter_filtered(tree, NULL, &tree_iterate_filtered_cb,
+ &ctx, &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_null(iter_result, "");
+ expect_d_eq(nspecial, ctx.ncalls, "");
+ /* Filtered forward iteration from a starting point. */
+ for (int i = 0; i < nnodes; i++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ iter_result = tree_iter_filtered(tree, &nodes[i],
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_null(iter_result, "");
+ expect_d_eq(nspecial - nodes[i].filtered_rank, ctx.ncalls, "");
+ }
+ /* Filtered forward iteration from the beginning, with stopping */
+ for (int i = 0; i < nspecial; i++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = i + 1;
+ iter_result = tree_iter_filtered(tree, NULL,
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_eq(sorted_filtered_nodes[i], iter_result, "");
+ expect_d_eq(ctx.ncalls, i + 1, "");
+ }
+ /* Filtered forward iteration from a starting point, with stopping. */
+ for (int i = 0; i < nnodes; i++) {
+ for (int j = 0; j < nspecial - nodes[i].filtered_rank; j++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = j + 1;
+ iter_result = tree_iter_filtered(tree, &nodes[i],
+ &tree_iterate_filtered_cb, &ctx,
+ &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_d_eq(j + 1, ctx.ncalls, "");
+ expect_ptr_eq(sorted_filtered_nodes[
+ nodes[i].filtered_rank + j], iter_result, "");
+ }
+ }
+
+ /* Backwards iteration. */
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = INT_MAX;
+ ctx.forward = false;
+
+ /* Filtered backward iteration from the end. */
+ iter_result = tree_reverse_iter_filtered(tree, NULL,
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_null(iter_result, "");
+ expect_d_eq(nspecial, ctx.ncalls, "");
+ /* Filtered backward iteration from a starting point. */
+ for (int i = 0; i < nnodes; i++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ iter_result = tree_reverse_iter_filtered(tree, &nodes[i],
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_null(iter_result, "");
+ int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0);
+ expect_d_eq(nodes[i].filtered_rank + surplus_rank, ctx.ncalls,
+ "");
+ }
+ /* Filtered backward iteration from the end, with stopping */
+ for (int i = 0; i < nspecial; i++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = i + 1;
+ iter_result = tree_reverse_iter_filtered(tree, NULL,
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_eq(sorted_filtered_nodes[nspecial - i - 1],
+ iter_result, "");
+ expect_d_eq(ctx.ncalls, i + 1, "");
+ }
+ /* Filtered backward iteration from a starting point, with stopping. */
+ for (int i = 0; i < nnodes; i++) {
+ int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0);
+ for (int j = 0; j < nodes[i].filtered_rank + surplus_rank;
+ j++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = j + 1;
+ iter_result = tree_reverse_iter_filtered(tree,
+ &nodes[i], &tree_iterate_filtered_cb, &ctx,
+ &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_d_eq(j + 1, ctx.ncalls, "");
+ expect_ptr_eq(sorted_filtered_nodes[
+ nodes[i].filtered_rank - j - 1 + surplus_rank],
+ iter_result, "");
+ }
+ }
+}
+
+static void
+do_update_search_test(int nnodes, int ntrees, int nremovals,
+ int nupdates) {
+ node_t nodes[UPDATE_TEST_MAX];
+ assert(nnodes <= UPDATE_TEST_MAX);
+
+ sfmt_t *sfmt = init_gen_rand(12345);
+ for (int i = 0; i < ntrees; i++) {
+ tree_t tree;
+ tree_new(&tree);
+ for (int j = 0; j < nnodes; j++) {
+ nodes[j].magic = NODE_MAGIC;
+ /*
+ * In consistency checking, we increment or decrement a
+ * key and assume that the result is not a key in the
+ * tree. This isn't a *real* concern with 64-bit keys
+ * and a good PRNG, but why not be correct anyways?
+ */
+ nodes[j].key = 2 * gen_rand64(sfmt);
+ nodes[j].specialness = 0;
+ nodes[j].mid_remove = false;
+ nodes[j].allow_duplicates = false;
+ nodes[j].summary_lchild = NULL;
+ nodes[j].summary_rchild = NULL;
+ nodes[j].summary_max_specialness = 0;
+ tree_insert(&tree, &nodes[j]);
+ }
+ for (int j = 0; j < nremovals; j++) {
+ int victim = (int)gen_rand64_range(sfmt, nnodes);
+ if (!nodes[victim].mid_remove) {
+ tree_remove(&tree, &nodes[victim]);
+ nodes[victim].mid_remove = true;
+ }
+ }
+ for (int j = 0; j < nnodes; j++) {
+ if (nodes[j].mid_remove) {
+ nodes[j].mid_remove = false;
+ nodes[j].key = 2 * gen_rand64(sfmt);
+ tree_insert(&tree, &nodes[j]);
+ }
+ }
+ for (int j = 0; j < nupdates; j++) {
+ uint32_t ind = gen_rand32_range(sfmt, nnodes);
+ nodes[ind].specialness = 1 - nodes[ind].specialness;
+ tree_update_summaries(&tree, &nodes[ind]);
+ check_consistency(&tree, nodes, nnodes);
+ }
+ }
+}
+
+TEST_BEGIN(test_rb_update_search) {
+ summarize_always_returns_true = false;
+ do_update_search_test(2, 100, 3, 50);
+ do_update_search_test(5, 100, 3, 50);
+ do_update_search_test(12, 100, 5, 1000);
+ do_update_search_test(100, 1, 50, 500);
+}
+TEST_END
+
+typedef rb_tree(node_t) unsummarized_tree_t;
+rb_gen(static UNUSED, unsummarized_tree_, unsummarized_tree_t, node_t, link,
+ node_cmp);
+
+static node_t *
+unsummarized_tree_iterate_cb(unsummarized_tree_t *tree, node_t *node,
+ void *data) {
+ unsigned *i = (unsigned *)data;
+ (*i)++;
+ return NULL;
+}
+/*
+ * The unsummarized and summarized funtionality is implemented via the same
+ * functions; we don't really need to do much more than test that we can exclude
+ * the filtered functionality without anything breaking.
+ */
+TEST_BEGIN(test_rb_unsummarized) {
+ unsummarized_tree_t tree;
+ unsummarized_tree_new(&tree);
+ unsigned nnodes = 0;
+ unsummarized_tree_iter(&tree, NULL, &unsummarized_tree_iterate_cb,
+ &nnodes);
+ expect_u_eq(0, nnodes, "");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_rb_empty,
+ test_rb_random,
+ test_rb_filter_simple,
+ test_rb_update_search,
+ test_rb_unsummarized);
+}
diff --git a/deps/jemalloc/test/unit/retained.c b/deps/jemalloc/test/unit/retained.c
new file mode 100644
index 0000000..aa9f684
--- /dev/null
+++ b/deps/jemalloc/test/unit/retained.c
@@ -0,0 +1,188 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/spin.h"
+
+static unsigned arena_ind;
+static size_t sz;
+static size_t esz;
+#define NEPOCHS 8
+#define PER_THD_NALLOCS 1
+static atomic_u_t epoch;
+static atomic_u_t nfinished;
+
+static unsigned
+do_arena_create(extent_hooks_t *h) {
+ unsigned new_arena_ind;
+ size_t ind_sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
+ (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
+ "Unexpected mallctl() failure");
+ return new_arena_ind;
+}
+
+static void
+do_arena_destroy(unsigned ind) {
+ size_t mib[3];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+static void
+do_refresh(void) {
+ uint64_t refresh_epoch = 1;
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
+ sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
+}
+
+static size_t
+do_get_size_impl(const char *cmd, unsigned ind) {
+ size_t mib[4];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ size_t z = sizeof(size_t);
+
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ size_t size;
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
+
+ return size;
+}
+
+static size_t
+do_get_active(unsigned ind) {
+ return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
+}
+
+static size_t
+do_get_mapped(unsigned ind) {
+ return do_get_size_impl("stats.arenas.0.mapped", ind);
+}
+
+static void *
+thd_start(void *arg) {
+ for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
+ /* Busy-wait for next epoch. */
+ unsigned cur_epoch;
+ spin_t spinner = SPIN_INITIALIZER;
+ while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
+ next_epoch) {
+ spin_adaptive(&spinner);
+ }
+ expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
+
+ /*
+ * Allocate. The main thread will reset the arena, so there's
+ * no need to deallocate.
+ */
+ for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
+ void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
+ MALLOCX_TCACHE_NONE
+ );
+ expect_ptr_not_null(p,
+ "Unexpected mallocx() failure\n");
+ }
+
+ /* Let the main thread know we've finished this iteration. */
+ atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_retained) {
+ test_skip_if(!config_stats);
+ test_skip_if(opt_hpa);
+
+ arena_ind = do_arena_create(NULL);
+ sz = nallocx(HUGEPAGE, 0);
+ size_t guard_sz = san_guard_enabled() ? SAN_PAGE_GUARDS_SIZE : 0;
+ esz = sz + sz_large_pad + guard_sz;
+
+ atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
+
+ unsigned nthreads = ncpus * 2;
+ if (LG_SIZEOF_PTR < 3 && nthreads > 16) {
+ nthreads = 16; /* 32-bit platform could run out of vaddr. */
+ }
+ VARIABLE_ARRAY(thd_t, threads, nthreads);
+ for (unsigned i = 0; i < nthreads; i++) {
+ thd_create(&threads[i], thd_start, NULL);
+ }
+
+ for (unsigned e = 1; e < NEPOCHS; e++) {
+ atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
+ atomic_store_u(&epoch, e, ATOMIC_RELEASE);
+
+ /* Wait for threads to finish allocating. */
+ spin_t spinner = SPIN_INITIALIZER;
+ while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
+ spin_adaptive(&spinner);
+ }
+
+ /*
+ * Assert that retained is no more than the sum of size classes
+ * that should have been used to satisfy the worker threads'
+ * requests, discounting per growth fragmentation.
+ */
+ do_refresh();
+
+ size_t allocated = (esz - guard_sz) * nthreads *
+ PER_THD_NALLOCS;
+ size_t active = do_get_active(arena_ind);
+ expect_zu_le(allocated, active, "Unexpected active memory");
+ size_t mapped = do_get_mapped(arena_ind);
+ expect_zu_le(active, mapped, "Unexpected mapped memory");
+
+ arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
+ size_t usable = 0;
+ size_t fragmented = 0;
+ for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
+ arena->pa_shard.pac.exp_grow.next; pind++) {
+ size_t psz = sz_pind2sz(pind);
+ size_t psz_fragmented = psz % esz;
+ size_t psz_usable = psz - psz_fragmented;
+ /*
+ * Only consider size classes that wouldn't be skipped.
+ */
+ if (psz_usable > 0) {
+ expect_zu_lt(usable, allocated,
+ "Excessive retained memory "
+ "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
+ allocated);
+ fragmented += psz_fragmented;
+ usable += psz_usable;
+ }
+ }
+
+ /*
+ * Clean up arena. Destroying and recreating the arena
+ * is simpler that specifying extent hooks that deallocate
+ * (rather than retaining) during reset.
+ */
+ do_arena_destroy(arena_ind);
+ expect_u_eq(do_arena_create(NULL), arena_ind,
+ "Unexpected arena index");
+ }
+
+ for (unsigned i = 0; i < nthreads; i++) {
+ thd_join(threads[i], NULL);
+ }
+
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_retained);
+}
diff --git a/deps/jemalloc/test/unit/rtree.c b/deps/jemalloc/test/unit/rtree.c
new file mode 100644
index 0000000..4101b72
--- /dev/null
+++ b/deps/jemalloc/test/unit/rtree.c
@@ -0,0 +1,289 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/rtree.h"
+
+#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
+
+/* Potentially too large to safely place on the stack. */
+rtree_t test_rtree;
+
+TEST_BEGIN(test_rtree_read_empty) {
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
+ rtree_t *rtree = &test_rtree;
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
+ rtree_contents_t contents;
+ expect_true(rtree_read_independent(tsdn, rtree, &rtree_ctx, PAGE,
+ &contents), "rtree_read_independent() should fail on empty rtree.");
+
+ base_delete(tsdn, base);
+}
+TEST_END
+
+#undef NTHREADS
+#undef NITERS
+#undef SEED
+
+static edata_t *
+alloc_edata(void) {
+ void *ret = mallocx(sizeof(edata_t), MALLOCX_ALIGN(EDATA_ALIGNMENT));
+ assert_ptr_not_null(ret, "Unexpected mallocx() failure");
+
+ return ret;
+}
+
+TEST_BEGIN(test_rtree_extrema) {
+ edata_t *edata_a, *edata_b;
+ edata_a = alloc_edata();
+ edata_b = alloc_edata();
+ edata_init(edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
+ false, sz_size2index(SC_LARGE_MINCLASS), 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+ edata_init(edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+
+ tsdn_t *tsdn = tsdn_fetch();
+
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
+ rtree_t *rtree = &test_rtree;
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
+
+ rtree_contents_t contents_a;
+ contents_a.edata = edata_a;
+ contents_a.metadata.szind = edata_szind_get(edata_a);
+ contents_a.metadata.slab = edata_slab_get(edata_a);
+ contents_a.metadata.is_head = edata_is_head_get(edata_a);
+ contents_a.metadata.state = edata_state_get(edata_a);
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
+ "Unexpected rtree_write() failure");
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
+ "Unexpected rtree_write() failure");
+ rtree_contents_t read_contents_a = rtree_read(tsdn, rtree, &rtree_ctx,
+ PAGE);
+ expect_true(contents_a.edata == read_contents_a.edata
+ && contents_a.metadata.szind == read_contents_a.metadata.szind
+ && contents_a.metadata.slab == read_contents_a.metadata.slab
+ && contents_a.metadata.is_head == read_contents_a.metadata.is_head
+ && contents_a.metadata.state == read_contents_a.metadata.state,
+ "rtree_read() should return previously set value");
+
+ rtree_contents_t contents_b;
+ contents_b.edata = edata_b;
+ contents_b.metadata.szind = edata_szind_get_maybe_invalid(edata_b);
+ contents_b.metadata.slab = edata_slab_get(edata_b);
+ contents_b.metadata.is_head = edata_is_head_get(edata_b);
+ contents_b.metadata.state = edata_state_get(edata_b);
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
+ contents_b), "Unexpected rtree_write() failure");
+ rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx,
+ ~((uintptr_t)0));
+ assert_true(contents_b.edata == read_contents_b.edata
+ && contents_b.metadata.szind == read_contents_b.metadata.szind
+ && contents_b.metadata.slab == read_contents_b.metadata.slab
+ && contents_b.metadata.is_head == read_contents_b.metadata.is_head
+ && contents_b.metadata.state == read_contents_b.metadata.state,
+ "rtree_read() should return previously set value");
+
+ base_delete(tsdn, base);
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_bits) {
+ tsdn_t *tsdn = tsdn_fetch();
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
+ uintptr_t keys[] = {PAGE, PAGE + 1,
+ PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
+ edata_t *edata_c = alloc_edata();
+ edata_init(edata_c, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+
+ rtree_t *rtree = &test_rtree;
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
+
+ for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
+ rtree_contents_t contents;
+ contents.edata = edata_c;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = extent_state_active;
+
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
+ contents), "Unexpected rtree_write() failure");
+ for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
+ expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[j]).edata, edata_c,
+ "rtree_edata_read() should return previously set "
+ "value and ignore insignificant key bits; i=%u, "
+ "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
+ j, keys[i], keys[j]);
+ }
+ expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
+ (((uintptr_t)2) << LG_PAGE)).edata,
+ "Only leftmost rtree leaf should be set; i=%u", i);
+ rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
+ }
+
+ base_delete(tsdn, base);
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_random) {
+#define NSET 16
+#define SEED 42
+ sfmt_t *sfmt = init_gen_rand(SEED);
+ tsdn_t *tsdn = tsdn_fetch();
+
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
+ uintptr_t keys[NSET];
+ rtree_t *rtree = &test_rtree;
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+
+ edata_t *edata_d = alloc_edata();
+ edata_init(edata_d, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
+
+ for (unsigned i = 0; i < NSET; i++) {
+ keys[i] = (uintptr_t)gen_rand64(sfmt);
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree,
+ &rtree_ctx, keys[i], false, true);
+ expect_ptr_not_null(elm,
+ "Unexpected rtree_leaf_elm_lookup() failure");
+ rtree_contents_t contents;
+ contents.edata = edata_d;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = edata_state_get(edata_d);
+ rtree_leaf_elm_write(tsdn, rtree, elm, contents);
+ expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[i]).edata, edata_d,
+ "rtree_edata_read() should return previously set value");
+ }
+ for (unsigned i = 0; i < NSET; i++) {
+ expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[i]).edata, edata_d,
+ "rtree_edata_read() should return previously set value, "
+ "i=%u", i);
+ }
+
+ for (unsigned i = 0; i < NSET; i++) {
+ rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
+ expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[i]).edata,
+ "rtree_edata_read() should return previously set value");
+ }
+ for (unsigned i = 0; i < NSET; i++) {
+ expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[i]).edata,
+ "rtree_edata_read() should return previously set value");
+ }
+
+ base_delete(tsdn, base);
+ fini_gen_rand(sfmt);
+#undef NSET
+#undef SEED
+}
+TEST_END
+
+static void
+test_rtree_range_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t start,
+ uintptr_t end) {
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+
+ edata_t *edata_e = alloc_edata();
+ edata_init(edata_e, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+ rtree_contents_t contents;
+ contents.edata = edata_e;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = extent_state_active;
+
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, start,
+ contents), "Unexpected rtree_write() failure");
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, end,
+ contents), "Unexpected rtree_write() failure");
+
+ rtree_write_range(tsdn, rtree, &rtree_ctx, start, end, contents);
+ for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
+ expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
+ start + (i << LG_PAGE)).edata, edata_e,
+ "rtree_edata_read() should return previously set value");
+ }
+ rtree_clear_range(tsdn, rtree, &rtree_ctx, start, end);
+ rtree_leaf_elm_t *elm;
+ for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
+ elm = rtree_leaf_elm_lookup(tsdn, rtree, &rtree_ctx,
+ start + (i << LG_PAGE), false, false);
+ expect_ptr_not_null(elm, "Should have been initialized.");
+ expect_ptr_null(rtree_leaf_elm_read(tsdn, rtree, elm,
+ false).edata, "Should have been cleared.");
+ }
+}
+
+TEST_BEGIN(test_rtree_range) {
+ tsdn_t *tsdn = tsdn_fetch();
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
+ rtree_t *rtree = &test_rtree;
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
+
+ /* Not crossing rtree node boundary first. */
+ uintptr_t start = ZU(1) << rtree_leaf_maskbits();
+ uintptr_t end = start + (ZU(100) << LG_PAGE);
+ test_rtree_range_write(tsdn, rtree, start, end);
+
+ /* Crossing rtree node boundary. */
+ start = (ZU(1) << rtree_leaf_maskbits()) - (ZU(10) << LG_PAGE);
+ end = start + (ZU(100) << LG_PAGE);
+ assert_ptr_ne((void *)rtree_leafkey(start), (void *)rtree_leafkey(end),
+ "The range should span across two rtree nodes");
+ test_rtree_range_write(tsdn, rtree, start, end);
+
+ base_delete(tsdn, base);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_rtree_read_empty,
+ test_rtree_extrema,
+ test_rtree_bits,
+ test_rtree_random,
+ test_rtree_range);
+}
diff --git a/deps/jemalloc/test/unit/safety_check.c b/deps/jemalloc/test/unit/safety_check.c
new file mode 100644
index 0000000..8472667
--- /dev/null
+++ b/deps/jemalloc/test/unit/safety_check.c
@@ -0,0 +1,163 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/safety_check.h"
+
+/*
+ * Note that we get called through safety_check.sh, which turns on sampling for
+ * everything.
+ */
+
+bool fake_abort_called;
+void fake_abort(const char *message) {
+ (void)message;
+ fake_abort_called = true;
+}
+
+static void
+buffer_overflow_write(char *ptr, size_t size) {
+ /* Avoid overflow warnings. */
+ volatile size_t idx = size;
+ ptr[idx] = 0;
+}
+
+TEST_BEGIN(test_malloc_free_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ buffer_overflow_write(ptr, 128);
+ free(ptr);
+ safety_check_set_abort(NULL);
+
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_mallocx_dallocx_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = mallocx(128, 0);
+ buffer_overflow_write(ptr, 128);
+ dallocx(ptr, 0);
+ safety_check_set_abort(NULL);
+
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_sdallocx_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ buffer_overflow_write(ptr, 128);
+ sdallocx(ptr, 128, 0);
+ safety_check_set_abort(NULL);
+
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_realloc_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ buffer_overflow_write(ptr, 128);
+ ptr = realloc(ptr, 129);
+ safety_check_set_abort(NULL);
+ free(ptr);
+
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_rallocx_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ buffer_overflow_write(ptr, 128);
+ ptr = rallocx(ptr, 129, 0);
+ safety_check_set_abort(NULL);
+ free(ptr);
+
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_xallocx_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ buffer_overflow_write(ptr, 128);
+ size_t result = xallocx(ptr, 129, 0, 0);
+ expect_zu_eq(result, 128, "");
+ free(ptr);
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+ safety_check_set_abort(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_realloc_no_overflow) {
+ char* ptr = malloc(128);
+ ptr = realloc(ptr, 256);
+ ptr[128] = 0;
+ ptr[255] = 0;
+ free(ptr);
+
+ ptr = malloc(128);
+ ptr = realloc(ptr, 64);
+ ptr[63] = 0;
+ ptr[0] = 0;
+ free(ptr);
+}
+TEST_END
+
+TEST_BEGIN(test_rallocx_no_overflow) {
+ char* ptr = malloc(128);
+ ptr = rallocx(ptr, 256, 0);
+ ptr[128] = 0;
+ ptr[255] = 0;
+ free(ptr);
+
+ ptr = malloc(128);
+ ptr = rallocx(ptr, 64, 0);
+ ptr[63] = 0;
+ ptr[0] = 0;
+ free(ptr);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_malloc_free_overflow,
+ test_mallocx_dallocx_overflow,
+ test_malloc_sdallocx_overflow,
+ test_realloc_overflow,
+ test_rallocx_overflow,
+ test_xallocx_overflow,
+ test_realloc_no_overflow,
+ test_rallocx_no_overflow);
+}
diff --git a/deps/jemalloc/test/unit/safety_check.sh b/deps/jemalloc/test/unit/safety_check.sh
new file mode 100644
index 0000000..485f9bf
--- /dev/null
+++ b/deps/jemalloc/test/unit/safety_check.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/san.c b/deps/jemalloc/test/unit/san.c
new file mode 100644
index 0000000..5b98f52
--- /dev/null
+++ b/deps/jemalloc/test/unit/san.c
@@ -0,0 +1,207 @@
+#include "test/jemalloc_test.h"
+#include "test/arena_util.h"
+#include "test/san.h"
+
+#include "jemalloc/internal/san.h"
+
+static void
+verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
+ expect_true(extent_is_guarded(tsdn, ptr),
+ "All extents should be guarded.");
+}
+
+#define MAX_SMALL_ALLOCATIONS 4096
+void *small_alloc[MAX_SMALL_ALLOCATIONS];
+
+/*
+ * This test allocates page sized slabs and checks that every two slabs have
+ * at least one page in between them. That page is supposed to be the guard
+ * page.
+ */
+TEST_BEGIN(test_guarded_small) {
+ test_skip_if(opt_prof);
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ unsigned npages = 16, pages_found = 0, ends_found = 0;
+ VARIABLE_ARRAY(uintptr_t, pages, npages);
+
+ /* Allocate to get sanitized pointers. */
+ size_t slab_sz = PAGE;
+ size_t sz = slab_sz / 8;
+ unsigned n_alloc = 0;
+ while (n_alloc < MAX_SMALL_ALLOCATIONS) {
+ void *ptr = malloc(sz);
+ expect_ptr_not_null(ptr, "Unexpected malloc() failure");
+ small_alloc[n_alloc] = ptr;
+ verify_extent_guarded(tsdn, ptr);
+ if ((uintptr_t)ptr % PAGE == 0) {
+ assert_u_lt(pages_found, npages,
+ "Unexpectedly large number of page aligned allocs");
+ pages[pages_found++] = (uintptr_t)ptr;
+ }
+ if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
+ ends_found++;
+ }
+ n_alloc++;
+ if (pages_found == npages && ends_found == npages) {
+ break;
+ }
+ }
+ /* Should found the ptrs being checked for overflow and underflow. */
+ expect_u_eq(pages_found, npages, "Could not found the expected pages.");
+ expect_u_eq(ends_found, npages, "Could not found the expected pages.");
+
+ /* Verify the pages are not continuous, i.e. separated by guards. */
+ for (unsigned i = 0; i < npages - 1; i++) {
+ for (unsigned j = i + 1; j < npages; j++) {
+ uintptr_t ptr_diff = pages[i] > pages[j] ?
+ pages[i] - pages[j] : pages[j] - pages[i];
+ expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
+ "There should be at least one pages between "
+ "guarded slabs");
+ }
+ }
+
+ for (unsigned i = 0; i < n_alloc + 1; i++) {
+ free(small_alloc[i]);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_guarded_large) {
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ unsigned nlarge = 32;
+ VARIABLE_ARRAY(uintptr_t, large, nlarge);
+
+ /* Allocate to get sanitized pointers. */
+ size_t large_sz = SC_LARGE_MINCLASS;
+ for (unsigned i = 0; i < nlarge; i++) {
+ void *ptr = malloc(large_sz);
+ verify_extent_guarded(tsdn, ptr);
+ expect_ptr_not_null(ptr, "Unexpected malloc() failure");
+ large[i] = (uintptr_t)ptr;
+ }
+
+ /* Verify the pages are not continuous, i.e. separated by guards. */
+ for (unsigned i = 0; i < nlarge; i++) {
+ for (unsigned j = i + 1; j < nlarge; j++) {
+ uintptr_t ptr_diff = large[i] > large[j] ?
+ large[i] - large[j] : large[j] - large[i];
+ expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
+ "There should be at least two pages between "
+ " guarded large allocations");
+ }
+ }
+
+ for (unsigned i = 0; i < nlarge; i++) {
+ free((void *)large[i]);
+ }
+}
+TEST_END
+
+static void
+verify_pdirty(unsigned arena_ind, uint64_t expected) {
+ uint64_t pdirty = get_arena_pdirty(arena_ind);
+ expect_u64_eq(pdirty, expected / PAGE,
+ "Unexpected dirty page amount.");
+}
+
+static void
+verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
+ uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
+ expect_u64_eq(pmuzzy, expected / PAGE,
+ "Unexpected muzzy page amount.");
+}
+
+TEST_BEGIN(test_guarded_decay) {
+ unsigned arena_ind = do_arena_create(-1, -1);
+ do_decay(arena_ind);
+ do_purge(arena_ind);
+
+ verify_pdirty(arena_ind, 0);
+ verify_pmuzzy(arena_ind, 0);
+
+ /* Verify that guarded extents as dirty. */
+ size_t sz1 = PAGE, sz2 = PAGE * 2;
+ /* W/o maps_coalesce, guarded extents are unguarded eagerly. */
+ size_t add_guard_size = maps_coalesce ? 0 : SAN_PAGE_GUARDS_SIZE;
+ generate_dirty(arena_ind, sz1);
+ verify_pdirty(arena_ind, sz1 + add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+
+ /* Should reuse the first extent. */
+ generate_dirty(arena_ind, sz1);
+ verify_pdirty(arena_ind, sz1 + add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+
+ /* Should not reuse; expect new dirty pages. */
+ generate_dirty(arena_ind, sz2);
+ verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ /* Should reuse dirty extents for the two mallocx. */
+ void *p1 = do_mallocx(sz1, flags);
+ verify_extent_guarded(tsdn, p1);
+ verify_pdirty(arena_ind, sz2 + add_guard_size);
+
+ void *p2 = do_mallocx(sz2, flags);
+ verify_extent_guarded(tsdn, p2);
+ verify_pdirty(arena_ind, 0);
+ verify_pmuzzy(arena_ind, 0);
+
+ dallocx(p1, flags);
+ verify_pdirty(arena_ind, sz1 + add_guard_size);
+ dallocx(p2, flags);
+ verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+
+ do_purge(arena_ind);
+ verify_pdirty(arena_ind, 0);
+ verify_pmuzzy(arena_ind, 0);
+
+ if (config_stats) {
+ expect_u64_eq(get_arena_npurge(arena_ind), 1,
+ "Expected purging to occur");
+ expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
+ "Expected purging to occur");
+ expect_u64_eq(get_arena_dirty_purged(arena_ind),
+ (sz1 + sz2 + 2 * add_guard_size) / PAGE,
+ "Expected purging to occur");
+ expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
+ "Expected purging to occur");
+ }
+
+ if (opt_retain) {
+ /*
+ * With retain, guarded extents are not mergable and will be
+ * cached in ecache_retained. They should be reused.
+ */
+ void *new_p1 = do_mallocx(sz1, flags);
+ verify_extent_guarded(tsdn, p1);
+ expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
+
+ void *new_p2 = do_mallocx(sz2, flags);
+ verify_extent_guarded(tsdn, p2);
+ expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
+
+ dallocx(new_p1, flags);
+ verify_pdirty(arena_ind, sz1 + add_guard_size);
+ dallocx(new_p2, flags);
+ verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+ }
+
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_guarded_small,
+ test_guarded_large,
+ test_guarded_decay);
+}
diff --git a/deps/jemalloc/test/unit/san.sh b/deps/jemalloc/test/unit/san.sh
new file mode 100644
index 0000000..933b4a4
--- /dev/null
+++ b/deps/jemalloc/test/unit/san.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="san_guard_large:1,san_guard_small:1"
diff --git a/deps/jemalloc/test/unit/san_bump.c b/deps/jemalloc/test/unit/san_bump.c
new file mode 100644
index 0000000..cafa37f
--- /dev/null
+++ b/deps/jemalloc/test/unit/san_bump.c
@@ -0,0 +1,111 @@
+#include "test/jemalloc_test.h"
+#include "test/arena_util.h"
+
+#include "jemalloc/internal/arena_structs.h"
+#include "jemalloc/internal/san_bump.h"
+
+TEST_BEGIN(test_san_bump_alloc) {
+ test_skip_if(!maps_coalesce || !opt_retain);
+
+ tsdn_t *tsdn = tsdn_fetch();
+
+ san_bump_alloc_t sba;
+ san_bump_alloc_init(&sba);
+
+ unsigned arena_ind = do_arena_create(0, 0);
+ assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
+
+ arena_t *arena = arena_get(tsdn, arena_ind, false);
+ pac_t *pac = &arena->pa_shard.pac;
+
+ size_t alloc_size = PAGE * 16;
+ size_t alloc_n = alloc_size / sizeof(unsigned);
+ edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
+ alloc_size, /* zero */ false);
+
+ expect_ptr_not_null(edata, "Failed to allocate edata");
+ expect_u_eq(edata_arena_ind_get(edata), arena_ind,
+ "Edata was assigned an incorrect arena id");
+ expect_zu_eq(edata_size_get(edata), alloc_size,
+ "Allocated edata of incorrect size");
+ expect_false(edata_slab_get(edata),
+ "Bump allocator incorrectly assigned 'slab' to true");
+ expect_true(edata_committed_get(edata), "Edata is not committed");
+
+ void *ptr = edata_addr_get(edata);
+ expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
+ /* Test that memory is allocated; no guard pages are misplaced */
+ for (unsigned i = 0; i < alloc_n; ++i) {
+ ((unsigned *)ptr)[i] = 1;
+ }
+
+ size_t alloc_size2 = PAGE * 28;
+ size_t alloc_n2 = alloc_size / sizeof(unsigned);
+ edata_t *edata2 = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
+ alloc_size2, /* zero */ true);
+
+ expect_ptr_not_null(edata2, "Failed to allocate edata");
+ expect_u_eq(edata_arena_ind_get(edata2), arena_ind,
+ "Edata was assigned an incorrect arena id");
+ expect_zu_eq(edata_size_get(edata2), alloc_size2,
+ "Allocated edata of incorrect size");
+ expect_false(edata_slab_get(edata2),
+ "Bump allocator incorrectly assigned 'slab' to true");
+ expect_true(edata_committed_get(edata2), "Edata is not committed");
+
+ void *ptr2 = edata_addr_get(edata2);
+ expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
+
+ uintptr_t ptrdiff = ptr2 > ptr ? (uintptr_t)ptr2 - (uintptr_t)ptr
+ : (uintptr_t)ptr - (uintptr_t)ptr2;
+ size_t between_allocs = (size_t)ptrdiff - alloc_size;
+
+ expect_zu_ge(between_allocs, PAGE,
+ "Guard page between allocs is missing");
+
+ for (unsigned i = 0; i < alloc_n2; ++i) {
+ expect_u_eq(((unsigned *)ptr2)[i], 0, "Memory is not zeroed");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_large_alloc_size) {
+ test_skip_if(!maps_coalesce || !opt_retain);
+
+ tsdn_t *tsdn = tsdn_fetch();
+
+ san_bump_alloc_t sba;
+ san_bump_alloc_init(&sba);
+
+ unsigned arena_ind = do_arena_create(0, 0);
+ assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
+
+ arena_t *arena = arena_get(tsdn, arena_ind, false);
+ pac_t *pac = &arena->pa_shard.pac;
+
+ size_t alloc_size = SBA_RETAINED_ALLOC_SIZE * 2;
+ edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
+ alloc_size, /* zero */ false);
+ expect_u_eq(edata_arena_ind_get(edata), arena_ind,
+ "Edata was assigned an incorrect arena id");
+ expect_zu_eq(edata_size_get(edata), alloc_size,
+ "Allocated edata of incorrect size");
+ expect_false(edata_slab_get(edata),
+ "Bump allocator incorrectly assigned 'slab' to true");
+ expect_true(edata_committed_get(edata), "Edata is not committed");
+
+ void *ptr = edata_addr_get(edata);
+ expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
+ /* Test that memory is allocated; no guard pages are misplaced */
+ for (unsigned i = 0; i < alloc_size / PAGE; ++i) {
+ *((char *)ptr + PAGE * i) = 1;
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_san_bump_alloc,
+ test_large_alloc_size);
+}
diff --git a/deps/jemalloc/test/unit/sc.c b/deps/jemalloc/test/unit/sc.c
new file mode 100644
index 0000000..d207481
--- /dev/null
+++ b/deps/jemalloc/test/unit/sc.c
@@ -0,0 +1,33 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_update_slab_size) {
+ sc_data_t data;
+ memset(&data, 0, sizeof(data));
+ sc_data_init(&data);
+ sc_t *tiny = &data.sc[0];
+ size_t tiny_size = (ZU(1) << tiny->lg_base)
+ + (ZU(tiny->ndelta) << tiny->lg_delta);
+ size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1;
+ sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big);
+ expect_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
+
+ sc_data_update_slab_size(&data, 1, 10 * PAGE, 1);
+ for (int i = 0; i < data.nbins; i++) {
+ sc_t *sc = &data.sc[i];
+ size_t reg_size = (ZU(1) << sc->lg_base)
+ + (ZU(sc->ndelta) << sc->lg_delta);
+ if (reg_size <= PAGE) {
+ expect_d_eq(sc->pgs, 1, "Ignored valid page size hint");
+ } else {
+ expect_d_gt(sc->pgs, 1,
+ "Allowed invalid page size hint");
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_update_slab_size);
+}
diff --git a/deps/jemalloc/test/unit/sec.c b/deps/jemalloc/test/unit/sec.c
new file mode 100644
index 0000000..f3ec403
--- /dev/null
+++ b/deps/jemalloc/test/unit/sec.c
@@ -0,0 +1,634 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/sec.h"
+
+typedef struct pai_test_allocator_s pai_test_allocator_t;
+struct pai_test_allocator_s {
+ pai_t pai;
+ bool alloc_fail;
+ size_t alloc_count;
+ size_t alloc_batch_count;
+ size_t dalloc_count;
+ size_t dalloc_batch_count;
+ /*
+ * We use a simple bump allocator as the implementation. This isn't
+ * *really* correct, since we may allow expansion into a subsequent
+ * allocation, but it's not like the SEC is really examining the
+ * pointers it gets back; this is mostly just helpful for debugging.
+ */
+ uintptr_t next_ptr;
+ size_t expand_count;
+ bool expand_return_value;
+ size_t shrink_count;
+ bool shrink_return_value;
+};
+
+static void
+test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
+ size_t max_bytes) {
+ sec_opts_t opts;
+ opts.nshards = 1;
+ opts.max_alloc = max_alloc;
+ opts.max_bytes = max_bytes;
+ /*
+ * Just choose reasonable defaults for these; most tests don't care so
+ * long as they're something reasonable.
+ */
+ opts.bytes_after_flush = max_bytes / 2;
+ opts.batch_fill_extra = 4;
+
+ /*
+ * We end up leaking this base, but that's fine; this test is
+ * short-running, and SECs are arena-scoped in reality.
+ */
+ base_t *base = base_new(TSDN_NULL, /* ind */ 123,
+ &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
+
+ bool err = sec_init(TSDN_NULL, sec, base, fallback, &opts);
+ assert_false(err, "Unexpected initialization failure");
+ assert_u_ge(sec->npsizes, 0, "Zero size classes allowed for caching");
+}
+
+static inline edata_t *
+pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated) {
+ assert(!guarded);
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ if (ta->alloc_fail) {
+ return NULL;
+ }
+ edata_t *edata = malloc(sizeof(edata_t));
+ assert_ptr_not_null(edata, "");
+ ta->next_ptr += alignment - 1;
+ edata_init(edata, /* arena_ind */ 0,
+ (void *)(ta->next_ptr & ~(alignment - 1)), size,
+ /* slab */ false,
+ /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ zero,
+ /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD);
+ ta->next_ptr += size;
+ ta->alloc_count++;
+ return edata;
+}
+
+static inline size_t
+pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t nallocs, edata_list_active_t *results,
+ bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ if (ta->alloc_fail) {
+ return 0;
+ }
+ for (size_t i = 0; i < nallocs; i++) {
+ edata_t *edata = malloc(sizeof(edata_t));
+ assert_ptr_not_null(edata, "");
+ edata_init(edata, /* arena_ind */ 0,
+ (void *)ta->next_ptr, size,
+ /* slab */ false, /* szind */ 0, /* sn */ 1,
+ extent_state_active, /* zero */ false, /* comitted */ true,
+ /* ranged */ false, EXTENT_NOT_HEAD);
+ ta->next_ptr += size;
+ ta->alloc_batch_count++;
+ edata_list_active_append(results, edata);
+ }
+ return nallocs;
+}
+
+static bool
+pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero,
+ bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ ta->expand_count++;
+ return ta->expand_return_value;
+}
+
+static bool
+pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ ta->shrink_count++;
+ return ta->shrink_return_value;
+}
+
+static void
+pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ ta->dalloc_count++;
+ free(edata);
+}
+
+static void
+pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
+ edata_list_active_t *list, bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+
+ edata_t *edata;
+ while ((edata = edata_list_active_first(list)) != NULL) {
+ edata_list_active_remove(list, edata);
+ ta->dalloc_batch_count++;
+ free(edata);
+ }
+}
+
+static inline void
+pai_test_allocator_init(pai_test_allocator_t *ta) {
+ ta->alloc_fail = false;
+ ta->alloc_count = 0;
+ ta->alloc_batch_count = 0;
+ ta->dalloc_count = 0;
+ ta->dalloc_batch_count = 0;
+ /* Just don't start the edata at 0. */
+ ta->next_ptr = 10 * PAGE;
+ ta->expand_count = 0;
+ ta->expand_return_value = false;
+ ta->shrink_count = 0;
+ ta->shrink_return_value = false;
+ ta->pai.alloc = &pai_test_allocator_alloc;
+ ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
+ ta->pai.expand = &pai_test_allocator_expand;
+ ta->pai.shrink = &pai_test_allocator_shrink;
+ ta->pai.dalloc = &pai_test_allocator_dalloc;
+ ta->pai.dalloc_batch = &pai_test_allocator_dalloc_batch;
+}
+
+TEST_BEGIN(test_reuse) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /*
+ * We can't use the "real" tsd, since we malloc within the test
+ * allocator hooks; we'd get lock inversion crashes. Eventually, we
+ * should have a way to mock tsds, but for now just don't do any
+ * lock-order checking.
+ */
+ tsdn_t *tsdn = TSDN_NULL;
+ /*
+ * 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
+ * able to get to 33 pages in the cache before triggering a flush. We
+ * set the flush liimt to twice this amount, to avoid accidentally
+ * triggering a flush caused by the batch-allocation down the cache fill
+ * pathway disrupting ordering.
+ */
+ enum { NALLOCS = 11 };
+ edata_t *one_page[NALLOCS];
+ edata_t *two_page[NALLOCS];
+ bool deferred_work_generated = false;
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
+ /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
+ for (int i = 0; i < NALLOCS; i++) {
+ one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
+ two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
+ }
+ expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
+ size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
+ expect_zu_le(2 * NALLOCS, max_allocs,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+ /*
+ * Free in a different order than we allocated, to make sure free-list
+ * separation works correctly.
+ */
+ for (int i = NALLOCS - 1; i >= 0; i--) {
+ pai_dalloc(tsdn, &sec.pai, one_page[i],
+ &deferred_work_generated);
+ }
+ for (int i = NALLOCS - 1; i >= 0; i--) {
+ pai_dalloc(tsdn, &sec.pai, two_page[i],
+ &deferred_work_generated);
+ }
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+ /*
+ * Check that the n'th most recent deallocated extent is returned for
+ * the n'th alloc request of a given size.
+ */
+ for (int i = 0; i < NALLOCS; i++) {
+ edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_eq(one_page[i], alloc1,
+ "Got unexpected allocation");
+ expect_ptr_eq(two_page[i], alloc2,
+ "Got unexpected allocation");
+ }
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+}
+TEST_END
+
+
+TEST_BEGIN(test_auto_flush) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+ /*
+ * 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
+ * able to get to 30 pages in the cache before triggering a flush. The
+ * choice of NALLOCS here is chosen to match the batch allocation
+ * default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
+ * empty, even in the presence of batch allocation on fill).
+ * Eventually, once our allocation batching strategies become smarter,
+ * this should change.
+ */
+ enum { NALLOCS = 10 };
+ edata_t *extra_alloc;
+ edata_t *allocs[NALLOCS];
+ bool deferred_work_generated = false;
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ NALLOCS * PAGE);
+ for (int i = 0; i < NALLOCS; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
+ }
+ extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
+ /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+ expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
+ size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
+ expect_zu_le(NALLOCS + 1, max_allocs,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+ /* Free until the SEC is full, but should not have flushed yet. */
+ for (int i = 0; i < NALLOCS; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
+ }
+ expect_zu_le(NALLOCS + 1, max_allocs,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+ /*
+ * Free the extra allocation; this should trigger a flush. The internal
+ * flushing logic is allowed to get complicated; for now, we rely on our
+ * whitebox knowledge of the fact that the SEC flushes bins in their
+ * entirety when it decides to do so, and it has only one bin active
+ * right now.
+ */
+ pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of (non-batch) deallocations");
+ expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
+ "Incorrect number of batch deallocations");
+}
+TEST_END
+
+/*
+ * A disable and a flush are *almost* equivalent; the only difference is what
+ * happens afterwards; disabling disallows all future caching as well.
+ */
+static void
+do_disable_flush_test(bool is_disable) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ enum { NALLOCS = 11 };
+ edata_t *allocs[NALLOCS];
+ bool deferred_work_generated = false;
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ NALLOCS * PAGE);
+ for (int i = 0; i < NALLOCS; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
+ }
+ /* Free all but the last aloc. */
+ for (int i = 0; i < NALLOCS - 1; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
+ }
+ size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
+
+ expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+
+ if (is_disable) {
+ sec_disable(tsdn, &sec);
+ } else {
+ sec_flush(tsdn, &sec);
+ }
+
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of (non-batch) deallocations");
+ expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
+ "Incorrect number of batch deallocations");
+ size_t old_dalloc_batch_count = ta.dalloc_batch_count;
+
+ /*
+ * If we free into a disabled SEC, it should forward to the fallback.
+ * Otherwise, the SEC should accept the allocation.
+ */
+ pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
+ &deferred_work_generated);
+
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
+ "Incorrect number of (non-batch) deallocations");
+ expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
+ "Incorrect number of batch deallocations");
+}
+
+TEST_BEGIN(test_disable) {
+ do_disable_flush_test(/* is_disable */ true);
+}
+TEST_END
+
+TEST_BEGIN(test_flush) {
+ do_disable_flush_test(/* is_disable */ false);
+}
+TEST_END
+
+TEST_BEGIN(test_max_alloc_respected) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ size_t max_alloc = 2 * PAGE;
+ size_t attempted_alloc = 3 * PAGE;
+
+ bool deferred_work_generated = false;
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
+ /* max_bytes */ 1000 * PAGE);
+
+ for (size_t i = 0; i < 100; i++) {
+ expect_zu_eq(i, ta.alloc_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(i, ta.dalloc_count,
+ "Incorrect number of deallocations");
+ edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
+ PAGE, /* zero */ false, /* guarded */ false,
+ /* frequent_reuse */ false, &deferred_work_generated);
+ expect_ptr_not_null(edata, "Unexpected alloc failure");
+ expect_zu_eq(i + 1, ta.alloc_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(i, ta.dalloc_count,
+ "Incorrect number of deallocations");
+ pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_expand_shrink_delegate) {
+ /*
+ * Expand and shrink shouldn't affect sec state; they should just
+ * delegate to the fallback PAI.
+ */
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ bool deferred_work_generated = false;
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
+ /* max_bytes */ 1000 * PAGE);
+ edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+ expect_ptr_not_null(edata, "Unexpected alloc failure");
+
+ bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
+ /* zero */ false, &deferred_work_generated);
+ expect_false(err, "Unexpected expand failure");
+ expect_zu_eq(1, ta.expand_count, "");
+ ta.expand_return_value = true;
+ err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
+ /* zero */ false, &deferred_work_generated);
+ expect_true(err, "Unexpected expand success");
+ expect_zu_eq(2, ta.expand_count, "");
+
+ err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
+ &deferred_work_generated);
+ expect_false(err, "Unexpected shrink failure");
+ expect_zu_eq(1, ta.shrink_count, "");
+ ta.shrink_return_value = true;
+ err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
+ &deferred_work_generated);
+ expect_true(err, "Unexpected shrink success");
+ expect_zu_eq(2, ta.shrink_count, "");
+}
+TEST_END
+
+TEST_BEGIN(test_nshards_0) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+ base_t *base = base_new(TSDN_NULL, /* ind */ 123,
+ &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
+
+ sec_opts_t opts = SEC_OPTS_DEFAULT;
+ opts.nshards = 0;
+ sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
+
+ bool deferred_work_generated = false;
+ edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+ pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
+
+ /* Both operations should have gone directly to the fallback. */
+ expect_zu_eq(1, ta.alloc_count, "");
+ expect_zu_eq(1, ta.dalloc_count, "");
+}
+TEST_END
+
+static void
+expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
+ sec_stats_t stats;
+ /*
+ * Check that the stats merging accumulates rather than overwrites by
+ * putting some (made up) data there to begin with.
+ */
+ stats.bytes = 123;
+ sec_stats_merge(tsdn, sec, &stats);
+ assert_zu_le(npages * PAGE + 123, stats.bytes, "");
+}
+
+TEST_BEGIN(test_stats_simple) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ enum {
+ NITERS = 100,
+ FLUSH_PAGES = 20,
+ };
+
+ bool deferred_work_generated = false;
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ FLUSH_PAGES * PAGE);
+
+ edata_t *allocs[FLUSH_PAGES];
+ for (size_t i = 0; i < FLUSH_PAGES; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, 0);
+ }
+
+ /* Increase and decrease, without flushing. */
+ for (size_t i = 0; i < NITERS; i++) {
+ for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[j],
+ &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, j + 1);
+ }
+ for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
+ allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false,
+ /* frequent_reuse */ false,
+ &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_stats_auto_flush) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ enum {
+ FLUSH_PAGES = 10,
+ };
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ FLUSH_PAGES * PAGE);
+
+ edata_t *extra_alloc0;
+ edata_t *extra_alloc1;
+ edata_t *allocs[2 * FLUSH_PAGES];
+
+ bool deferred_work_generated = false;
+
+ extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
+ /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+ extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
+ /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+
+ for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ }
+
+ for (size_t i = 0; i < FLUSH_PAGES; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
+ }
+ pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
+
+ /* Flush the remaining pages; stats should still work. */
+ for (size_t i = 0; i < FLUSH_PAGES; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
+ &deferred_work_generated);
+ }
+
+ pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
+
+ expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
+ - ta.dalloc_count - ta.dalloc_batch_count);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_manual_flush) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ enum {
+ FLUSH_PAGES = 10,
+ };
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ FLUSH_PAGES * PAGE);
+
+ bool deferred_work_generated = false;
+ edata_t *allocs[FLUSH_PAGES];
+ for (size_t i = 0; i < FLUSH_PAGES; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, 0);
+ }
+
+ /* Dalloc the first half of the allocations. */
+ for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, i + 1);
+ }
+
+ sec_flush(tsdn, &sec);
+ expect_stats_pages(tsdn, &sec, 0);
+
+ /* Flush the remaining pages. */
+ for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
+ &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, i + 1);
+ }
+ sec_disable(tsdn, &sec);
+ expect_stats_pages(tsdn, &sec, 0);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_reuse,
+ test_auto_flush,
+ test_disable,
+ test_flush,
+ test_max_alloc_respected,
+ test_expand_shrink_delegate,
+ test_nshards_0,
+ test_stats_simple,
+ test_stats_auto_flush,
+ test_stats_manual_flush);
+}
diff --git a/deps/jemalloc/test/unit/seq.c b/deps/jemalloc/test/unit/seq.c
new file mode 100644
index 0000000..06ed683
--- /dev/null
+++ b/deps/jemalloc/test/unit/seq.c
@@ -0,0 +1,95 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/seq.h"
+
+typedef struct data_s data_t;
+struct data_s {
+ int arr[10];
+};
+
+static void
+set_data(data_t *data, int num) {
+ for (int i = 0; i < 10; i++) {
+ data->arr[i] = num;
+ }
+}
+
+static void
+expect_data(data_t *data) {
+ int num = data->arr[0];
+ for (int i = 0; i < 10; i++) {
+ expect_d_eq(num, data->arr[i], "Data consistency error");
+ }
+}
+
+seq_define(data_t, data)
+
+typedef struct thd_data_s thd_data_t;
+struct thd_data_s {
+ seq_data_t data;
+};
+
+static void *
+seq_reader_thd(void *arg) {
+ thd_data_t *thd_data = (thd_data_t *)arg;
+ int iter = 0;
+ data_t local_data;
+ while (iter < 1000 * 1000 - 1) {
+ bool success = seq_try_load_data(&local_data, &thd_data->data);
+ if (success) {
+ expect_data(&local_data);
+ expect_d_le(iter, local_data.arr[0],
+ "Seq read went back in time.");
+ iter = local_data.arr[0];
+ }
+ }
+ return NULL;
+}
+
+static void *
+seq_writer_thd(void *arg) {
+ thd_data_t *thd_data = (thd_data_t *)arg;
+ data_t local_data;
+ memset(&local_data, 0, sizeof(local_data));
+ for (int i = 0; i < 1000 * 1000; i++) {
+ set_data(&local_data, i);
+ seq_store_data(&thd_data->data, &local_data);
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_seq_threaded) {
+ thd_data_t thd_data;
+ memset(&thd_data, 0, sizeof(thd_data));
+
+ thd_t reader;
+ thd_t writer;
+
+ thd_create(&reader, seq_reader_thd, &thd_data);
+ thd_create(&writer, seq_writer_thd, &thd_data);
+
+ thd_join(reader, NULL);
+ thd_join(writer, NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_seq_simple) {
+ data_t data;
+ seq_data_t seq;
+ memset(&seq, 0, sizeof(seq));
+ for (int i = 0; i < 1000 * 1000; i++) {
+ set_data(&data, i);
+ seq_store_data(&seq, &data);
+ set_data(&data, 0);
+ bool success = seq_try_load_data(&data, &seq);
+ expect_b_eq(success, true, "Failed non-racing read");
+ expect_data(&data);
+ }
+}
+TEST_END
+
+int main(void) {
+ return test_no_reentrancy(
+ test_seq_simple,
+ test_seq_threaded);
+}
diff --git a/deps/jemalloc/test/unit/size_check.c b/deps/jemalloc/test/unit/size_check.c
new file mode 100644
index 0000000..accdc40
--- /dev/null
+++ b/deps/jemalloc/test/unit/size_check.c
@@ -0,0 +1,79 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/safety_check.h"
+
+bool fake_abort_called;
+void fake_abort(const char *message) {
+ (void)message;
+ fake_abort_called = true;
+}
+
+#define SMALL_SIZE1 SC_SMALL_MAXCLASS
+#define SMALL_SIZE2 (SC_SMALL_MAXCLASS / 2)
+
+#define LARGE_SIZE1 SC_LARGE_MINCLASS
+#define LARGE_SIZE2 (LARGE_SIZE1 * 2)
+
+void *
+test_invalid_size_pre(size_t sz) {
+ safety_check_set_abort(&fake_abort);
+
+ fake_abort_called = false;
+ void *ptr = malloc(sz);
+ assert_ptr_not_null(ptr, "Unexpected failure");
+
+ return ptr;
+}
+
+void
+test_invalid_size_post(void) {
+ expect_true(fake_abort_called, "Safety check didn't fire");
+ safety_check_set_abort(NULL);
+}
+
+TEST_BEGIN(test_invalid_size_sdallocx) {
+ test_skip_if(!config_opt_size_checks);
+
+ void *ptr = test_invalid_size_pre(SMALL_SIZE1);
+ sdallocx(ptr, SMALL_SIZE2, 0);
+ test_invalid_size_post();
+
+ ptr = test_invalid_size_pre(LARGE_SIZE1);
+ sdallocx(ptr, LARGE_SIZE2, 0);
+ test_invalid_size_post();
+}
+TEST_END
+
+TEST_BEGIN(test_invalid_size_sdallocx_nonzero_flag) {
+ test_skip_if(!config_opt_size_checks);
+
+ void *ptr = test_invalid_size_pre(SMALL_SIZE1);
+ sdallocx(ptr, SMALL_SIZE2, MALLOCX_TCACHE_NONE);
+ test_invalid_size_post();
+
+ ptr = test_invalid_size_pre(LARGE_SIZE1);
+ sdallocx(ptr, LARGE_SIZE2, MALLOCX_TCACHE_NONE);
+ test_invalid_size_post();
+}
+TEST_END
+
+TEST_BEGIN(test_invalid_size_sdallocx_noflags) {
+ test_skip_if(!config_opt_size_checks);
+
+ void *ptr = test_invalid_size_pre(SMALL_SIZE1);
+ je_sdallocx_noflags(ptr, SMALL_SIZE2);
+ test_invalid_size_post();
+
+ ptr = test_invalid_size_pre(LARGE_SIZE1);
+ je_sdallocx_noflags(ptr, LARGE_SIZE2);
+ test_invalid_size_post();
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_invalid_size_sdallocx,
+ test_invalid_size_sdallocx_nonzero_flag,
+ test_invalid_size_sdallocx_noflags);
+}
diff --git a/deps/jemalloc/test/unit/size_check.sh b/deps/jemalloc/test/unit/size_check.sh
new file mode 100644
index 0000000..352d110
--- /dev/null
+++ b/deps/jemalloc/test/unit/size_check.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:false"
+fi
diff --git a/deps/jemalloc/test/unit/size_classes.c b/deps/jemalloc/test/unit/size_classes.c
new file mode 100644
index 0000000..c70eb59
--- /dev/null
+++ b/deps/jemalloc/test/unit/size_classes.c
@@ -0,0 +1,188 @@
+#include "test/jemalloc_test.h"
+
+static size_t
+get_max_size_class(void) {
+ unsigned nlextents;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ 0), 0, "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nlextents - 1;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
+ NULL, 0), 0, "Unexpected mallctlbymib() error");
+
+ return max_size_class;
+}
+
+TEST_BEGIN(test_size_classes) {
+ size_t size_class, max_size_class;
+ szind_t index, max_index;
+
+ max_size_class = get_max_size_class();
+ max_index = sz_size2index(max_size_class);
+
+ for (index = 0, size_class = sz_index2size(index); index < max_index ||
+ size_class < max_size_class; index++, size_class =
+ sz_index2size(index)) {
+ expect_true(index < max_index,
+ "Loop conditionals should be equivalent; index=%u, "
+ "size_class=%zu (%#zx)", index, size_class, size_class);
+ expect_true(size_class < max_size_class,
+ "Loop conditionals should be equivalent; index=%u, "
+ "size_class=%zu (%#zx)", index, size_class, size_class);
+
+ expect_u_eq(index, sz_size2index(size_class),
+ "sz_size2index() does not reverse sz_index2size(): index=%u"
+ " --> size_class=%zu --> index=%u --> size_class=%zu",
+ index, size_class, sz_size2index(size_class),
+ sz_index2size(sz_size2index(size_class)));
+ expect_zu_eq(size_class,
+ sz_index2size(sz_size2index(size_class)),
+ "sz_index2size() does not reverse sz_size2index(): index=%u"
+ " --> size_class=%zu --> index=%u --> size_class=%zu",
+ index, size_class, sz_size2index(size_class),
+ sz_index2size(sz_size2index(size_class)));
+
+ expect_u_eq(index+1, sz_size2index(size_class+1),
+ "Next size_class does not round up properly");
+
+ expect_zu_eq(size_class, (index > 0) ?
+ sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1),
+ "sz_s2u() does not round up to size class");
+ expect_zu_eq(size_class, sz_s2u(size_class-1),
+ "sz_s2u() does not round up to size class");
+ expect_zu_eq(size_class, sz_s2u(size_class),
+ "sz_s2u() does not compute same size class");
+ expect_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
+ "sz_s2u() does not round up to next size class");
+ }
+
+ expect_u_eq(index, sz_size2index(sz_index2size(index)),
+ "sz_size2index() does not reverse sz_index2size()");
+ expect_zu_eq(max_size_class, sz_index2size(
+ sz_size2index(max_size_class)),
+ "sz_index2size() does not reverse sz_size2index()");
+
+ expect_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
+ "sz_s2u() does not round up to size class");
+ expect_zu_eq(size_class, sz_s2u(size_class-1),
+ "sz_s2u() does not round up to size class");
+ expect_zu_eq(size_class, sz_s2u(size_class),
+ "sz_s2u() does not compute same size class");
+}
+TEST_END
+
+TEST_BEGIN(test_psize_classes) {
+ size_t size_class, max_psz;
+ pszind_t pind, max_pind;
+
+ max_psz = get_max_size_class() + PAGE;
+ max_pind = sz_psz2ind(max_psz);
+
+ for (pind = 0, size_class = sz_pind2sz(pind);
+ pind < max_pind || size_class < max_psz;
+ pind++, size_class = sz_pind2sz(pind)) {
+ expect_true(pind < max_pind,
+ "Loop conditionals should be equivalent; pind=%u, "
+ "size_class=%zu (%#zx)", pind, size_class, size_class);
+ expect_true(size_class < max_psz,
+ "Loop conditionals should be equivalent; pind=%u, "
+ "size_class=%zu (%#zx)", pind, size_class, size_class);
+
+ expect_u_eq(pind, sz_psz2ind(size_class),
+ "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->"
+ " size_class=%zu --> pind=%u --> size_class=%zu", pind,
+ size_class, sz_psz2ind(size_class),
+ sz_pind2sz(sz_psz2ind(size_class)));
+ expect_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
+ "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->"
+ " size_class=%zu --> pind=%u --> size_class=%zu", pind,
+ size_class, sz_psz2ind(size_class),
+ sz_pind2sz(sz_psz2ind(size_class)));
+
+ if (size_class == SC_LARGE_MAXCLASS) {
+ expect_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
+ "Next size_class does not round up properly");
+ } else {
+ expect_u_eq(pind + 1, sz_psz2ind(size_class + 1),
+ "Next size_class does not round up properly");
+ }
+
+ expect_zu_eq(size_class, (pind > 0) ?
+ sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1),
+ "sz_psz2u() does not round up to size class");
+ expect_zu_eq(size_class, sz_psz2u(size_class-1),
+ "sz_psz2u() does not round up to size class");
+ expect_zu_eq(size_class, sz_psz2u(size_class),
+ "sz_psz2u() does not compute same size class");
+ expect_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
+ "sz_psz2u() does not round up to next size class");
+ }
+
+ expect_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
+ "sz_psz2ind() does not reverse sz_pind2sz()");
+ expect_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
+ "sz_pind2sz() does not reverse sz_psz2ind()");
+
+ expect_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
+ "sz_psz2u() does not round up to size class");
+ expect_zu_eq(size_class, sz_psz2u(size_class-1),
+ "sz_psz2u() does not round up to size class");
+ expect_zu_eq(size_class, sz_psz2u(size_class),
+ "sz_psz2u() does not compute same size class");
+}
+TEST_END
+
+TEST_BEGIN(test_overflow) {
+ size_t max_size_class, max_psz;
+
+ max_size_class = get_max_size_class();
+ max_psz = max_size_class + PAGE;
+
+ expect_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
+ "sz_size2index() should return NSIZES on overflow");
+ expect_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
+ "sz_size2index() should return NSIZES on overflow");
+ expect_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
+ "sz_size2index() should return NSIZES on overflow");
+
+ expect_zu_eq(sz_s2u(max_size_class+1), 0,
+ "sz_s2u() should return 0 for unsupported size");
+ expect_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
+ "sz_s2u() should return 0 for unsupported size");
+ expect_zu_eq(sz_s2u(SIZE_T_MAX), 0,
+ "sz_s2u() should return 0 on overflow");
+
+ expect_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
+ "sz_psz2ind() should return NPSIZES on overflow");
+ expect_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
+ "sz_psz2ind() should return NPSIZES on overflow");
+ expect_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
+ "sz_psz2ind() should return NPSIZES on overflow");
+
+ expect_zu_eq(sz_psz2u(max_size_class+1), max_psz,
+ "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
+ " size");
+ expect_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
+ "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
+ "size");
+ expect_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
+ "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_size_classes,
+ test_psize_classes,
+ test_overflow);
+}
diff --git a/deps/jemalloc/test/unit/slab.c b/deps/jemalloc/test/unit/slab.c
new file mode 100644
index 0000000..70fc5c7
--- /dev/null
+++ b/deps/jemalloc/test/unit/slab.c
@@ -0,0 +1,39 @@
+#include "test/jemalloc_test.h"
+
+#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
+
+TEST_BEGIN(test_arena_slab_regind) {
+ szind_t binind;
+
+ for (binind = 0; binind < SC_NBINS; binind++) {
+ size_t regind;
+ edata_t slab;
+ const bin_info_t *bin_info = &bin_infos[binind];
+ edata_init(&slab, INVALID_ARENA_IND,
+ mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
+ bin_info->slab_size, true,
+ binind, 0, extent_state_active, false, true, EXTENT_PAI_PAC,
+ EXTENT_NOT_HEAD);
+ expect_ptr_not_null(edata_addr_get(&slab),
+ "Unexpected malloc() failure");
+ arena_dalloc_bin_locked_info_t dalloc_info;
+ arena_dalloc_bin_locked_begin(&dalloc_info, binind);
+ for (regind = 0; regind < bin_info->nregs; regind++) {
+ void *reg = (void *)((uintptr_t)edata_addr_get(&slab) +
+ (bin_info->reg_size * regind));
+ expect_zu_eq(arena_slab_regind(&dalloc_info, binind,
+ &slab, reg),
+ regind,
+ "Incorrect region index computed for size %zu",
+ bin_info->reg_size);
+ }
+ free(edata_addr_get(&slab));
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_arena_slab_regind);
+}
diff --git a/deps/jemalloc/test/unit/smoothstep.c b/deps/jemalloc/test/unit/smoothstep.c
new file mode 100644
index 0000000..588c9f4
--- /dev/null
+++ b/deps/jemalloc/test/unit/smoothstep.c
@@ -0,0 +1,102 @@
+#include "test/jemalloc_test.h"
+
+static const uint64_t smoothstep_tab[] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
+};
+
+TEST_BEGIN(test_smoothstep_integral) {
+ uint64_t sum, min, max;
+ unsigned i;
+
+ /*
+ * The integral of smoothstep in the [0..1] range equals 1/2. Verify
+ * that the fixed point representation's integral is no more than
+ * rounding error distant from 1/2. Regarding rounding, each table
+ * element is rounded down to the nearest fixed point value, so the
+ * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
+ */
+ sum = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += smoothstep_tab[i];
+ }
+
+ max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
+ min = max - SMOOTHSTEP_NSTEPS;
+
+ expect_u64_ge(sum, min,
+ "Integral too small, even accounting for truncation");
+ expect_u64_le(sum, max, "Integral exceeds 1/2");
+ if (false) {
+ malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
+ max - sum, SMOOTHSTEP_NSTEPS);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_smoothstep_monotonic) {
+ uint64_t prev_h;
+ unsigned i;
+
+ /*
+ * The smoothstep function is monotonic in [0..1], i.e. its slope is
+ * non-negative. In practice we want to parametrize table generation
+ * such that piecewise slope is greater than zero, but do not require
+ * that here.
+ */
+ prev_h = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ uint64_t h = smoothstep_tab[i];
+ expect_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
+ prev_h = h;
+ }
+ expect_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
+ (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
+}
+TEST_END
+
+TEST_BEGIN(test_smoothstep_slope) {
+ uint64_t prev_h, prev_delta;
+ unsigned i;
+
+ /*
+ * The smoothstep slope strictly increases until x=0.5, and then
+ * strictly decreases until x=1.0. Verify the slightly weaker
+ * requirement of monotonicity, so that inadequate table precision does
+ * not cause false test failures.
+ */
+ prev_h = 0;
+ prev_delta = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
+ uint64_t h = smoothstep_tab[i];
+ uint64_t delta = h - prev_h;
+ expect_u64_ge(delta, prev_delta,
+ "Slope must monotonically increase in 0.0 <= x <= 0.5, "
+ "i=%u", i);
+ prev_h = h;
+ prev_delta = delta;
+ }
+
+ prev_h = KQU(1) << SMOOTHSTEP_BFP;
+ prev_delta = 0;
+ for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
+ uint64_t h = smoothstep_tab[i];
+ uint64_t delta = prev_h - h;
+ expect_u64_ge(delta, prev_delta,
+ "Slope must monotonically decrease in 0.5 <= x <= 1.0, "
+ "i=%u", i);
+ prev_h = h;
+ prev_delta = delta;
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_smoothstep_integral,
+ test_smoothstep_monotonic,
+ test_smoothstep_slope);
+}
diff --git a/deps/jemalloc/test/unit/spin.c b/deps/jemalloc/test/unit/spin.c
new file mode 100644
index 0000000..b965f74
--- /dev/null
+++ b/deps/jemalloc/test/unit/spin.c
@@ -0,0 +1,18 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/spin.h"
+
+TEST_BEGIN(test_spin) {
+ spin_t spinner = SPIN_INITIALIZER;
+
+ for (unsigned i = 0; i < 100; i++) {
+ spin_adaptive(&spinner);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_spin);
+}
diff --git a/deps/jemalloc/test/unit/stats.c b/deps/jemalloc/test/unit/stats.c
new file mode 100644
index 0000000..bbdbd18
--- /dev/null
+++ b/deps/jemalloc/test/unit/stats.c
@@ -0,0 +1,431 @@
+#include "test/jemalloc_test.h"
+
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
+
+TEST_BEGIN(test_stats_summary) {
+ size_t sz, allocated, active, resident, mapped;
+ int expected = config_stats ? 0 : ENOENT;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
+ 0), expected, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ expect_zu_le(allocated, active,
+ "allocated should be no larger than active");
+ expect_zu_lt(active, resident,
+ "active should be less than resident");
+ expect_zu_lt(active, mapped,
+ "active should be less than mapped");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_stats_large) {
+ void *p;
+ uint64_t epoch;
+ size_t allocated;
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("stats.arenas.0.large.allocated",
+ (void *)&allocated, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.large.nrequests",
+ (void *)&nrequests, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ expect_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ expect_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ expect_u64_le(nmalloc, nrequests,
+ "nmalloc should no larger than nrequests");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_summary) {
+ void *little, *large;
+ uint64_t epoch;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+ size_t mapped;
+ uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
+ uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
+
+ little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
+ expect_ptr_not_null(little, "Unexpected mallocx() failure");
+ large = mallocx((1U << SC_LG_LARGE_MINCLASS),
+ MALLOCX_ARENA(0));
+ expect_ptr_not_null(large, "Unexpected mallocx() failure");
+
+ dallocx(little, 0);
+ dallocx(large, 0);
+
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
+ 0), expected, "Unexepected mallctl() result");
+
+ sz = sizeof(uint64_t);
+ expect_d_eq(mallctl("stats.arenas.0.dirty_npurge",
+ (void *)&dirty_npurge, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
+ (void *)&dirty_nmadvise, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.dirty_purged",
+ (void *)&dirty_purged, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
+ (void *)&muzzy_npurge, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
+ (void *)&muzzy_nmadvise, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.muzzy_purged",
+ (void *)&muzzy_purged, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+
+ if (config_stats) {
+ if (!is_background_thread_enabled() && !opt_hpa) {
+ expect_u64_gt(dirty_npurge + muzzy_npurge, 0,
+ "At least one purge should have occurred");
+ }
+ expect_u64_le(dirty_nmadvise, dirty_purged,
+ "dirty_nmadvise should be no greater than dirty_purged");
+ expect_u64_le(muzzy_nmadvise, muzzy_purged,
+ "muzzy_nmadvise should be no greater than muzzy_purged");
+ }
+}
+TEST_END
+
+void *
+thd_start(void *arg) {
+ return NULL;
+}
+
+static void
+no_lazy_lock(void) {
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+
+TEST_BEGIN(test_stats_arenas_small) {
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc, nrequests;
+ int expected = config_stats ? 0 : ENOENT;
+
+ no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
+
+ p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("stats.arenas.0.small.allocated",
+ (void *)&allocated, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ expect_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.small.nrequests",
+ (void *)&nrequests, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ expect_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ expect_u64_gt(nmalloc, 0,
+ "nmalloc should be no greater than zero");
+ expect_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ expect_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_large) {
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc;
+ int expected = config_stats ? 0 : ENOENT;
+
+ p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("stats.arenas.0.large.allocated",
+ (void *)&allocated, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ expect_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ expect_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ expect_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static void
+gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) {
+ sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name);
+}
+
+TEST_BEGIN(test_stats_arenas_bins) {
+ void *p;
+ size_t sz, curslabs, curregs, nonfull_slabs;
+ uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t nslabs, nreslabs;
+ int expected = config_stats ? 0 : ENOENT;
+
+ /* Make sure allocation below isn't satisfied by tcache. */
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
+
+ unsigned arena_ind, old_arena_ind;
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Arena creation failure");
+ sz = sizeof(arena_ind);
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ (void *)&arena_ind, sizeof(arena_ind)), 0,
+ "Unexpected mallctl() failure");
+
+ p = malloc(bin_infos[0].reg_size);
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ char cmd[128];
+ sz = sizeof(uint64_t);
+ gen_mallctl_str(cmd, "nmalloc", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "ndalloc", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "nrequests", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ gen_mallctl_str(cmd, "curregs", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ sz = sizeof(uint64_t);
+ gen_mallctl_str(cmd, "nfills", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "nflushes", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ gen_mallctl_str(cmd, "nslabs", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "nreslabs", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ gen_mallctl_str(cmd, "curslabs", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
+ expect_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ expect_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ expect_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ expect_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ expect_zu_gt(curregs, 0,
+ "allocated should be greater than zero");
+ if (opt_tcache) {
+ expect_u64_gt(nfills, 0,
+ "At least one fill should have occurred");
+ expect_u64_gt(nflushes, 0,
+ "At least one flush should have occurred");
+ }
+ expect_u64_gt(nslabs, 0,
+ "At least one slab should have been allocated");
+ expect_zu_gt(curslabs, 0,
+ "At least one slab should be currently allocated");
+ expect_zu_eq(nonfull_slabs, 0,
+ "slabs_nonfull should be empty");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_lextents) {
+ void *p;
+ uint64_t epoch, nmalloc, ndalloc;
+ size_t curlextents, sz, hsize;
+ int expected = config_stats ? 0 : ENOENT;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+
+ p = mallocx(hsize, MALLOCX_ARENA(0));
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(uint64_t);
+ expect_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
+ (void *)&nmalloc, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ expect_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
+ (void *)&ndalloc, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
+ (void *)&curlextents, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ expect_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ expect_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ expect_u64_gt(curlextents, 0,
+ "At least one extent should be currently allocated");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static void
+test_tcache_bytes_for_usize(size_t usize) {
+ uint64_t epoch;
+ size_t tcache_bytes, tcache_stashed_bytes;
+ size_t sz = sizeof(tcache_bytes);
+
+ void *ptr = mallocx(usize, 0);
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
+ &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
+ ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ size_t tcache_bytes_before = tcache_bytes + tcache_stashed_bytes;
+ dallocx(ptr, 0);
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
+ &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
+ ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ size_t tcache_bytes_after = tcache_bytes + tcache_stashed_bytes;
+ assert_zu_eq(tcache_bytes_after - tcache_bytes_before,
+ usize, "Incorrectly attributed a free");
+}
+
+TEST_BEGIN(test_stats_tcache_bytes_small) {
+ test_skip_if(!config_stats);
+ test_skip_if(!opt_tcache);
+ test_skip_if(opt_tcache_max < SC_SMALL_MAXCLASS);
+
+ test_tcache_bytes_for_usize(SC_SMALL_MAXCLASS);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_tcache_bytes_large) {
+ test_skip_if(!config_stats);
+ test_skip_if(!opt_tcache);
+ test_skip_if(opt_tcache_max < SC_LARGE_MINCLASS);
+
+ test_tcache_bytes_for_usize(SC_LARGE_MINCLASS);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_stats_summary,
+ test_stats_large,
+ test_stats_arenas_summary,
+ test_stats_arenas_small,
+ test_stats_arenas_large,
+ test_stats_arenas_bins,
+ test_stats_arenas_lextents,
+ test_stats_tcache_bytes_small,
+ test_stats_tcache_bytes_large);
+}
diff --git a/deps/jemalloc/test/unit/stats_print.c b/deps/jemalloc/test/unit/stats_print.c
new file mode 100644
index 0000000..3b31775
--- /dev/null
+++ b/deps/jemalloc/test/unit/stats_print.c
@@ -0,0 +1,999 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/util.h"
+
+typedef enum {
+ TOKEN_TYPE_NONE,
+ TOKEN_TYPE_ERROR,
+ TOKEN_TYPE_EOI,
+ TOKEN_TYPE_NULL,
+ TOKEN_TYPE_FALSE,
+ TOKEN_TYPE_TRUE,
+ TOKEN_TYPE_LBRACKET,
+ TOKEN_TYPE_RBRACKET,
+ TOKEN_TYPE_LBRACE,
+ TOKEN_TYPE_RBRACE,
+ TOKEN_TYPE_COLON,
+ TOKEN_TYPE_COMMA,
+ TOKEN_TYPE_STRING,
+ TOKEN_TYPE_NUMBER
+} token_type_t;
+
+typedef struct parser_s parser_t;
+typedef struct {
+ parser_t *parser;
+ token_type_t token_type;
+ size_t pos;
+ size_t len;
+ size_t line;
+ size_t col;
+} token_t;
+
+struct parser_s {
+ bool verbose;
+ char *buf; /* '\0'-terminated. */
+ size_t len; /* Number of characters preceding '\0' in buf. */
+ size_t pos;
+ size_t line;
+ size_t col;
+ token_t token;
+};
+
+static void
+token_init(token_t *token, parser_t *parser, token_type_t token_type,
+ size_t pos, size_t len, size_t line, size_t col) {
+ token->parser = parser;
+ token->token_type = token_type;
+ token->pos = pos;
+ token->len = len;
+ token->line = line;
+ token->col = col;
+}
+
+static void
+token_error(token_t *token) {
+ if (!token->parser->verbose) {
+ return;
+ }
+ switch (token->token_type) {
+ case TOKEN_TYPE_NONE:
+ not_reached();
+ case TOKEN_TYPE_ERROR:
+ malloc_printf("%zu:%zu: Unexpected character in token: ",
+ token->line, token->col);
+ break;
+ default:
+ malloc_printf("%zu:%zu: Unexpected token: ", token->line,
+ token->col);
+ break;
+ }
+ UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO,
+ &token->parser->buf[token->pos], token->len);
+ malloc_printf("\n");
+}
+
+static void
+parser_init(parser_t *parser, bool verbose) {
+ parser->verbose = verbose;
+ parser->buf = NULL;
+ parser->len = 0;
+ parser->pos = 0;
+ parser->line = 1;
+ parser->col = 0;
+}
+
+static void
+parser_fini(parser_t *parser) {
+ if (parser->buf != NULL) {
+ dallocx(parser->buf, MALLOCX_TCACHE_NONE);
+ }
+}
+
+static bool
+parser_append(parser_t *parser, const char *str) {
+ size_t len = strlen(str);
+ char *buf = (parser->buf == NULL) ? mallocx(len + 1,
+ MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1,
+ MALLOCX_TCACHE_NONE);
+ if (buf == NULL) {
+ return true;
+ }
+ memcpy(&buf[parser->len], str, len + 1);
+ parser->buf = buf;
+ parser->len += len;
+ return false;
+}
+
+static bool
+parser_tokenize(parser_t *parser) {
+ enum {
+ STATE_START,
+ STATE_EOI,
+ STATE_N, STATE_NU, STATE_NUL, STATE_NULL,
+ STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE,
+ STATE_T, STATE_TR, STATE_TRU, STATE_TRUE,
+ STATE_LBRACKET,
+ STATE_RBRACKET,
+ STATE_LBRACE,
+ STATE_RBRACE,
+ STATE_COLON,
+ STATE_COMMA,
+ STATE_CHARS,
+ STATE_CHAR_ESCAPE,
+ STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD,
+ STATE_STRING,
+ STATE_MINUS,
+ STATE_LEADING_ZERO,
+ STATE_DIGITS,
+ STATE_DECIMAL,
+ STATE_FRAC_DIGITS,
+ STATE_EXP,
+ STATE_EXP_SIGN,
+ STATE_EXP_DIGITS,
+ STATE_ACCEPT
+ } state = STATE_START;
+ size_t token_pos JEMALLOC_CC_SILENCE_INIT(0);
+ size_t token_line JEMALLOC_CC_SILENCE_INIT(1);
+ size_t token_col JEMALLOC_CC_SILENCE_INIT(0);
+
+ expect_zu_le(parser->pos, parser->len,
+ "Position is past end of buffer");
+
+ while (state != STATE_ACCEPT) {
+ char c = parser->buf[parser->pos];
+
+ switch (state) {
+ case STATE_START:
+ token_pos = parser->pos;
+ token_line = parser->line;
+ token_col = parser->col;
+ switch (c) {
+ case ' ': case '\b': case '\n': case '\r': case '\t':
+ break;
+ case '\0':
+ state = STATE_EOI;
+ break;
+ case 'n':
+ state = STATE_N;
+ break;
+ case 'f':
+ state = STATE_F;
+ break;
+ case 't':
+ state = STATE_T;
+ break;
+ case '[':
+ state = STATE_LBRACKET;
+ break;
+ case ']':
+ state = STATE_RBRACKET;
+ break;
+ case '{':
+ state = STATE_LBRACE;
+ break;
+ case '}':
+ state = STATE_RBRACE;
+ break;
+ case ':':
+ state = STATE_COLON;
+ break;
+ case ',':
+ state = STATE_COMMA;
+ break;
+ case '"':
+ state = STATE_CHARS;
+ break;
+ case '-':
+ state = STATE_MINUS;
+ break;
+ case '0':
+ state = STATE_LEADING_ZERO;
+ break;
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_EOI:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_EOI, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_N:
+ switch (c) {
+ case 'u':
+ state = STATE_NU;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_NU:
+ switch (c) {
+ case 'l':
+ state = STATE_NUL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_NUL:
+ switch (c) {
+ case 'l':
+ state = STATE_NULL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_NULL:
+ switch (c) {
+ case ' ': case '\b': case '\n': case '\r': case '\t':
+ case '\0':
+ case '[': case ']': case '{': case '}': case ':':
+ case ',':
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ token_init(&parser->token, parser, TOKEN_TYPE_NULL,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_F:
+ switch (c) {
+ case 'a':
+ state = STATE_FA;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FA:
+ switch (c) {
+ case 'l':
+ state = STATE_FAL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FAL:
+ switch (c) {
+ case 's':
+ state = STATE_FALS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FALS:
+ switch (c) {
+ case 'e':
+ state = STATE_FALSE;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FALSE:
+ switch (c) {
+ case ' ': case '\b': case '\n': case '\r': case '\t':
+ case '\0':
+ case '[': case ']': case '{': case '}': case ':':
+ case ',':
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_FALSE, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_T:
+ switch (c) {
+ case 'r':
+ state = STATE_TR;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_TR:
+ switch (c) {
+ case 'u':
+ state = STATE_TRU;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_TRU:
+ switch (c) {
+ case 'e':
+ state = STATE_TRUE;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_TRUE:
+ switch (c) {
+ case ' ': case '\b': case '\n': case '\r': case '\t':
+ case '\0':
+ case '[': case ']': case '{': case '}': case ':':
+ case ',':
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ token_init(&parser->token, parser, TOKEN_TYPE_TRUE,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_LBRACKET:
+ token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_RBRACKET:
+ token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_LBRACE:
+ token_init(&parser->token, parser, TOKEN_TYPE_LBRACE,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_RBRACE:
+ token_init(&parser->token, parser, TOKEN_TYPE_RBRACE,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_COLON:
+ token_init(&parser->token, parser, TOKEN_TYPE_COLON,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_COMMA:
+ token_init(&parser->token, parser, TOKEN_TYPE_COMMA,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_CHARS:
+ switch (c) {
+ case '\\':
+ state = STATE_CHAR_ESCAPE;
+ break;
+ case '"':
+ state = STATE_STRING;
+ break;
+ case 0x00: case 0x01: case 0x02: case 0x03: case 0x04:
+ case 0x05: case 0x06: case 0x07: case 0x08: case 0x09:
+ case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e:
+ case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13:
+ case 0x14: case 0x15: case 0x16: case 0x17: case 0x18:
+ case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d:
+ case 0x1e: case 0x1f:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ default:
+ break;
+ }
+ break;
+ case STATE_CHAR_ESCAPE:
+ switch (c) {
+ case '"': case '\\': case '/': case 'b': case 'n':
+ case 'r': case 't':
+ state = STATE_CHARS;
+ break;
+ case 'u':
+ state = STATE_CHAR_U;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_CHAR_U:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ state = STATE_CHAR_UD;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_CHAR_UD:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ state = STATE_CHAR_UDD;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_CHAR_UDD:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ state = STATE_CHAR_UDDD;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_CHAR_UDDD:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ state = STATE_CHARS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_STRING:
+ token_init(&parser->token, parser, TOKEN_TYPE_STRING,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_MINUS:
+ switch (c) {
+ case '0':
+ state = STATE_LEADING_ZERO;
+ break;
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_LEADING_ZERO:
+ switch (c) {
+ case '.':
+ state = STATE_DECIMAL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_NUMBER, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ }
+ break;
+ case STATE_DIGITS:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ break;
+ case '.':
+ state = STATE_DECIMAL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_NUMBER, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ }
+ break;
+ case STATE_DECIMAL:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_FRAC_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FRAC_DIGITS:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ break;
+ case 'e': case 'E':
+ state = STATE_EXP;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_NUMBER, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ }
+ break;
+ case STATE_EXP:
+ switch (c) {
+ case '-': case '+':
+ state = STATE_EXP_SIGN;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_EXP_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_EXP_SIGN:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_EXP_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_EXP_DIGITS:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_NUMBER, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ }
+ break;
+ default:
+ not_reached();
+ }
+
+ if (state != STATE_ACCEPT) {
+ if (c == '\n') {
+ parser->line++;
+ parser->col = 0;
+ } else {
+ parser->col++;
+ }
+ parser->pos++;
+ }
+ }
+ return false;
+}
+
+static bool parser_parse_array(parser_t *parser);
+static bool parser_parse_object(parser_t *parser);
+
+static bool
+parser_parse_value(parser_t *parser) {
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_NULL:
+ case TOKEN_TYPE_FALSE:
+ case TOKEN_TYPE_TRUE:
+ case TOKEN_TYPE_STRING:
+ case TOKEN_TYPE_NUMBER:
+ return false;
+ case TOKEN_TYPE_LBRACE:
+ return parser_parse_object(parser);
+ case TOKEN_TYPE_LBRACKET:
+ return parser_parse_array(parser);
+ default:
+ return true;
+ }
+ not_reached();
+}
+
+static bool
+parser_parse_pair(parser_t *parser) {
+ expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
+ "Pair should start with string");
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_COLON:
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ return parser_parse_value(parser);
+ default:
+ return true;
+ }
+}
+
+static bool
+parser_parse_values(parser_t *parser) {
+ if (parser_parse_value(parser)) {
+ return true;
+ }
+
+ while (true) {
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_COMMA:
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ if (parser_parse_value(parser)) {
+ return true;
+ }
+ break;
+ case TOKEN_TYPE_RBRACKET:
+ return false;
+ default:
+ return true;
+ }
+ }
+}
+
+static bool
+parser_parse_array(parser_t *parser) {
+ expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
+ "Array should start with [");
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_RBRACKET:
+ return false;
+ default:
+ return parser_parse_values(parser);
+ }
+ not_reached();
+}
+
+static bool
+parser_parse_pairs(parser_t *parser) {
+ expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
+ "Object should start with string");
+ if (parser_parse_pair(parser)) {
+ return true;
+ }
+
+ while (true) {
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_COMMA:
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_STRING:
+ if (parser_parse_pair(parser)) {
+ return true;
+ }
+ break;
+ default:
+ return true;
+ }
+ break;
+ case TOKEN_TYPE_RBRACE:
+ return false;
+ default:
+ return true;
+ }
+ }
+}
+
+static bool
+parser_parse_object(parser_t *parser) {
+ expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
+ "Object should start with {");
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_STRING:
+ return parser_parse_pairs(parser);
+ case TOKEN_TYPE_RBRACE:
+ return false;
+ default:
+ return true;
+ }
+ not_reached();
+}
+
+static bool
+parser_parse(parser_t *parser) {
+ if (parser_tokenize(parser)) {
+ goto label_error;
+ }
+ if (parser_parse_value(parser)) {
+ goto label_error;
+ }
+
+ if (parser_tokenize(parser)) {
+ goto label_error;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_EOI:
+ return false;
+ default:
+ goto label_error;
+ }
+ not_reached();
+
+label_error:
+ token_error(&parser->token);
+ return true;
+}
+
+TEST_BEGIN(test_json_parser) {
+ size_t i;
+ const char *invalid_inputs[] = {
+ /* Tokenizer error case tests. */
+ "{ \"string\": X }",
+ "{ \"string\": nXll }",
+ "{ \"string\": nuXl }",
+ "{ \"string\": nulX }",
+ "{ \"string\": nullX }",
+ "{ \"string\": fXlse }",
+ "{ \"string\": faXse }",
+ "{ \"string\": falXe }",
+ "{ \"string\": falsX }",
+ "{ \"string\": falseX }",
+ "{ \"string\": tXue }",
+ "{ \"string\": trXe }",
+ "{ \"string\": truX }",
+ "{ \"string\": trueX }",
+ "{ \"string\": \"\n\" }",
+ "{ \"string\": \"\\z\" }",
+ "{ \"string\": \"\\uX000\" }",
+ "{ \"string\": \"\\u0X00\" }",
+ "{ \"string\": \"\\u00X0\" }",
+ "{ \"string\": \"\\u000X\" }",
+ "{ \"string\": -X }",
+ "{ \"string\": 0.X }",
+ "{ \"string\": 0.0eX }",
+ "{ \"string\": 0.0e+X }",
+
+ /* Parser error test cases. */
+ "{\"string\": }",
+ "{\"string\" }",
+ "{\"string\": [ 0 }",
+ "{\"string\": {\"a\":0, 1 } }",
+ "{\"string\": {\"a\":0: } }",
+ "{",
+ "{}{",
+ };
+ const char *valid_inputs[] = {
+ /* Token tests. */
+ "null",
+ "false",
+ "true",
+ "{}",
+ "{\"a\": 0}",
+ "[]",
+ "[0, 1]",
+ "0",
+ "1",
+ "10",
+ "-10",
+ "10.23",
+ "10.23e4",
+ "10.23e-4",
+ "10.23e+4",
+ "10.23E4",
+ "10.23E-4",
+ "10.23E+4",
+ "-10.23",
+ "-10.23e4",
+ "-10.23e-4",
+ "-10.23e+4",
+ "-10.23E4",
+ "-10.23E-4",
+ "-10.23E+4",
+ "\"value\"",
+ "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"",
+
+ /* Parser test with various nesting. */
+ "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}",
+ };
+
+ for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) {
+ const char *input = invalid_inputs[i];
+ parser_t parser;
+ parser_init(&parser, false);
+ expect_false(parser_append(&parser, input),
+ "Unexpected input appending failure");
+ expect_true(parser_parse(&parser),
+ "Unexpected parse success for input: %s", input);
+ parser_fini(&parser);
+ }
+
+ for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) {
+ const char *input = valid_inputs[i];
+ parser_t parser;
+ parser_init(&parser, true);
+ expect_false(parser_append(&parser, input),
+ "Unexpected input appending failure");
+ expect_false(parser_parse(&parser),
+ "Unexpected parse error for input: %s", input);
+ parser_fini(&parser);
+ }
+}
+TEST_END
+
+void
+write_cb(void *opaque, const char *str) {
+ parser_t *parser = (parser_t *)opaque;
+ if (parser_append(parser, str)) {
+ test_fail("Unexpected input appending failure");
+ }
+}
+
+TEST_BEGIN(test_stats_print_json) {
+ const char *opts[] = {
+ "J",
+ "Jg",
+ "Jm",
+ "Jd",
+ "Jmd",
+ "Jgd",
+ "Jgm",
+ "Jgmd",
+ "Ja",
+ "Jb",
+ "Jl",
+ "Jx",
+ "Jbl",
+ "Jal",
+ "Jab",
+ "Jabl",
+ "Jax",
+ "Jbx",
+ "Jlx",
+ "Jablx",
+ "Jgmdablx",
+ };
+ unsigned arena_ind, i;
+
+ for (i = 0; i < 3; i++) {
+ unsigned j;
+
+ switch (i) {
+ case 0:
+ break;
+ case 1: {
+ size_t sz = sizeof(arena_ind);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind,
+ &sz, NULL, 0), 0, "Unexpected mallctl failure");
+ break;
+ } case 2: {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.destroy",
+ mib, &miblen), 0,
+ "Unexpected mallctlnametomib failure");
+ mib[1] = arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
+ 0), 0, "Unexpected mallctlbymib failure");
+ break;
+ } default:
+ not_reached();
+ }
+
+ for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) {
+ parser_t parser;
+
+ parser_init(&parser, true);
+ malloc_stats_print(write_cb, (void *)&parser, opts[j]);
+ expect_false(parser_parse(&parser),
+ "Unexpected parse error, opts=\"%s\"", opts[j]);
+ parser_fini(&parser);
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_json_parser,
+ test_stats_print_json);
+}
diff --git a/deps/jemalloc/test/unit/sz.c b/deps/jemalloc/test/unit/sz.c
new file mode 100644
index 0000000..8ae04b9
--- /dev/null
+++ b/deps/jemalloc/test/unit/sz.c
@@ -0,0 +1,66 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_sz_psz2ind) {
+ /*
+ * Testing page size classes which reside prior to the regular group
+ * with all size classes divisible by page size.
+ * For x86_64 Linux, it's 4096, 8192, 12288, 16384, with corresponding
+ * pszind 0, 1, 2 and 3.
+ */
+ for (size_t i = 0; i < SC_NGROUP; i++) {
+ for (size_t psz = i * PAGE + 1; psz <= (i + 1) * PAGE; psz++) {
+ pszind_t ind = sz_psz2ind(psz);
+ expect_zu_eq(ind, i, "Got %u as sz_psz2ind of %zu", ind,
+ psz);
+ }
+ }
+
+ sc_data_t data;
+ memset(&data, 0, sizeof(data));
+ sc_data_init(&data);
+ /*
+ * 'base' is the base of the first regular group with all size classes
+ * divisible by page size.
+ * For x86_64 Linux, it's 16384, and base_ind is 36.
+ */
+ size_t base_psz = 1 << (SC_LG_NGROUP + LG_PAGE);
+ size_t base_ind = 0;
+ while (base_ind < SC_NSIZES &&
+ reg_size_compute(data.sc[base_ind].lg_base,
+ data.sc[base_ind].lg_delta,
+ data.sc[base_ind].ndelta) < base_psz) {
+ base_ind++;
+ }
+ expect_zu_eq(
+ reg_size_compute(data.sc[base_ind].lg_base,
+ data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta),
+ base_psz, "Size class equal to %zu not found", base_psz);
+ /*
+ * Test different sizes falling into groups after the 'base'. The
+ * increment is PAGE / 3 for the execution speed purpose.
+ */
+ base_ind -= SC_NGROUP;
+ for (size_t psz = base_psz; psz <= 64 * 1024 * 1024; psz += PAGE / 3) {
+ pszind_t ind = sz_psz2ind(psz);
+ sc_t gt_sc = data.sc[ind + base_ind];
+ expect_zu_gt(psz,
+ reg_size_compute(gt_sc.lg_base, gt_sc.lg_delta,
+ gt_sc.ndelta),
+ "Got %u as sz_psz2ind of %zu", ind, psz);
+ sc_t le_sc = data.sc[ind + base_ind + 1];
+ expect_zu_le(psz,
+ reg_size_compute(le_sc.lg_base, le_sc.lg_delta,
+ le_sc.ndelta),
+ "Got %u as sz_psz2ind of %zu", ind, psz);
+ }
+
+ pszind_t max_ind = sz_psz2ind(SC_LARGE_MAXCLASS + 1);
+ expect_lu_eq(max_ind, SC_NPSIZES,
+ "Got %u as sz_psz2ind of %llu", max_ind, SC_LARGE_MAXCLASS);
+}
+TEST_END
+
+int
+main(void) {
+ return test(test_sz_psz2ind);
+}
diff --git a/deps/jemalloc/test/unit/tcache_max.c b/deps/jemalloc/test/unit/tcache_max.c
new file mode 100644
index 0000000..1f657c8
--- /dev/null
+++ b/deps/jemalloc/test/unit/tcache_max.c
@@ -0,0 +1,175 @@
+#include "test/jemalloc_test.h"
+#include "test/san.h"
+
+const char *malloc_conf = TEST_SAN_UAF_ALIGN_DISABLE;
+
+enum {
+ alloc_option_start = 0,
+ use_malloc = 0,
+ use_mallocx,
+ alloc_option_end
+};
+
+enum {
+ dalloc_option_start = 0,
+ use_free = 0,
+ use_dallocx,
+ use_sdallocx,
+ dalloc_option_end
+};
+
+static unsigned alloc_option, dalloc_option;
+static size_t tcache_max;
+
+static void *
+alloc_func(size_t sz) {
+ void *ret;
+
+ switch (alloc_option) {
+ case use_malloc:
+ ret = malloc(sz);
+ break;
+ case use_mallocx:
+ ret = mallocx(sz, 0);
+ break;
+ default:
+ unreachable();
+ }
+ expect_ptr_not_null(ret, "Unexpected malloc / mallocx failure");
+
+ return ret;
+}
+
+static void
+dalloc_func(void *ptr, size_t sz) {
+ switch (dalloc_option) {
+ case use_free:
+ free(ptr);
+ break;
+ case use_dallocx:
+ dallocx(ptr, 0);
+ break;
+ case use_sdallocx:
+ sdallocx(ptr, sz, 0);
+ break;
+ default:
+ unreachable();
+ }
+}
+
+static size_t
+tcache_bytes_read(void) {
+ uint64_t epoch;
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ size_t tcache_bytes;
+ size_t sz = sizeof(tcache_bytes);
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
+ &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
+
+ return tcache_bytes;
+}
+
+static void
+tcache_bytes_check_update(size_t *prev, ssize_t diff) {
+ size_t tcache_bytes = tcache_bytes_read();
+ expect_zu_eq(tcache_bytes, *prev + diff, "tcache bytes not expected");
+
+ *prev += diff;
+}
+
+static void
+test_tcache_bytes_alloc(size_t alloc_size) {
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0,
+ "Unexpected tcache flush failure");
+
+ size_t usize = sz_s2u(alloc_size);
+ /* No change is expected if usize is outside of tcache_max range. */
+ bool cached = (usize <= tcache_max);
+ ssize_t diff = cached ? usize : 0;
+
+ void *ptr1 = alloc_func(alloc_size);
+ void *ptr2 = alloc_func(alloc_size);
+
+ size_t bytes = tcache_bytes_read();
+ dalloc_func(ptr2, alloc_size);
+ /* Expect tcache_bytes increase after dalloc */
+ tcache_bytes_check_update(&bytes, diff);
+
+ dalloc_func(ptr1, alloc_size);
+ /* Expect tcache_bytes increase again */
+ tcache_bytes_check_update(&bytes, diff);
+
+ void *ptr3 = alloc_func(alloc_size);
+ if (cached) {
+ expect_ptr_eq(ptr1, ptr3, "Unexpected cached ptr");
+ }
+ /* Expect tcache_bytes decrease after alloc */
+ tcache_bytes_check_update(&bytes, -diff);
+
+ void *ptr4 = alloc_func(alloc_size);
+ if (cached) {
+ expect_ptr_eq(ptr2, ptr4, "Unexpected cached ptr");
+ }
+ /* Expect tcache_bytes decrease again */
+ tcache_bytes_check_update(&bytes, -diff);
+
+ dalloc_func(ptr3, alloc_size);
+ tcache_bytes_check_update(&bytes, diff);
+ dalloc_func(ptr4, alloc_size);
+ tcache_bytes_check_update(&bytes, diff);
+}
+
+static void
+test_tcache_max_impl(void) {
+ size_t sz;
+ sz = sizeof(tcache_max);
+ assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
+ &sz, NULL, 0), 0, "Unexpected mallctl() failure");
+
+ /* opt.tcache_max set to 1024 in tcache_max.sh */
+ expect_zu_eq(tcache_max, 1024, "tcache_max not expected");
+
+ test_tcache_bytes_alloc(1);
+ test_tcache_bytes_alloc(tcache_max - 1);
+ test_tcache_bytes_alloc(tcache_max);
+ test_tcache_bytes_alloc(tcache_max + 1);
+
+ test_tcache_bytes_alloc(PAGE - 1);
+ test_tcache_bytes_alloc(PAGE);
+ test_tcache_bytes_alloc(PAGE + 1);
+
+ size_t large;
+ sz = sizeof(large);
+ assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+
+ test_tcache_bytes_alloc(large - 1);
+ test_tcache_bytes_alloc(large);
+ test_tcache_bytes_alloc(large + 1);
+}
+
+TEST_BEGIN(test_tcache_max) {
+ test_skip_if(!config_stats);
+ test_skip_if(!opt_tcache);
+ test_skip_if(opt_prof);
+ test_skip_if(san_uaf_detection_enabled());
+
+ for (alloc_option = alloc_option_start;
+ alloc_option < alloc_option_end;
+ alloc_option++) {
+ for (dalloc_option = dalloc_option_start;
+ dalloc_option < dalloc_option_end;
+ dalloc_option++) {
+ test_tcache_max_impl();
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(test_tcache_max);
+}
diff --git a/deps/jemalloc/test/unit/tcache_max.sh b/deps/jemalloc/test/unit/tcache_max.sh
new file mode 100644
index 0000000..4480d73
--- /dev/null
+++ b/deps/jemalloc/test/unit/tcache_max.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="tcache_max:1024"
diff --git a/deps/jemalloc/test/unit/test_hooks.c b/deps/jemalloc/test/unit/test_hooks.c
new file mode 100644
index 0000000..8cd2b3b
--- /dev/null
+++ b/deps/jemalloc/test/unit/test_hooks.c
@@ -0,0 +1,38 @@
+#include "test/jemalloc_test.h"
+
+static bool hook_called = false;
+
+static void
+hook() {
+ hook_called = true;
+}
+
+static int
+func_to_hook(int arg1, int arg2) {
+ return arg1 + arg2;
+}
+
+#define func_to_hook JEMALLOC_TEST_HOOK(func_to_hook, test_hooks_libc_hook)
+
+TEST_BEGIN(unhooked_call) {
+ test_hooks_libc_hook = NULL;
+ hook_called = false;
+ expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
+ expect_false(hook_called, "Nulling out hook didn't take.");
+}
+TEST_END
+
+TEST_BEGIN(hooked_call) {
+ test_hooks_libc_hook = &hook;
+ hook_called = false;
+ expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
+ expect_true(hook_called, "Hook should have executed.");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ unhooked_call,
+ hooked_call);
+}
diff --git a/deps/jemalloc/test/unit/thread_event.c b/deps/jemalloc/test/unit/thread_event.c
new file mode 100644
index 0000000..e0b88a9
--- /dev/null
+++ b/deps/jemalloc/test/unit/thread_event.c
@@ -0,0 +1,34 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_next_event_fast) {
+ tsd_t *tsd = tsd_fetch();
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, true);
+
+ te_ctx_last_event_set(&ctx, 0);
+ te_ctx_current_bytes_set(&ctx, TE_NEXT_EVENT_FAST_MAX - 8U);
+ te_ctx_next_event_set(tsd, &ctx, TE_NEXT_EVENT_FAST_MAX);
+#define E(event, condition, is_alloc) \
+ if (is_alloc && condition) { \
+ event##_event_wait_set(tsd, TE_NEXT_EVENT_FAST_MAX); \
+ }
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+
+ /* Test next_event_fast rolling back to 0. */
+ void *p = malloc(16U);
+ assert_ptr_not_null(p, "malloc() failed");
+ free(p);
+
+ /* Test next_event_fast resuming to be equal to next_event. */
+ void *q = malloc(SC_LOOKUP_MAXCLASS);
+ assert_ptr_not_null(q, "malloc() failed");
+ free(q);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_next_event_fast);
+}
diff --git a/deps/jemalloc/test/unit/thread_event.sh b/deps/jemalloc/test/unit/thread_event.sh
new file mode 100644
index 0000000..8fcc7d8
--- /dev/null
+++ b/deps/jemalloc/test/unit/thread_event.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/ticker.c b/deps/jemalloc/test/unit/ticker.c
new file mode 100644
index 0000000..0dd7786
--- /dev/null
+++ b/deps/jemalloc/test/unit/ticker.c
@@ -0,0 +1,100 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ticker.h"
+
+TEST_BEGIN(test_ticker_tick) {
+#define NREPS 2
+#define NTICKS 3
+ ticker_t ticker;
+ int32_t i, j;
+
+ ticker_init(&ticker, NTICKS);
+ for (i = 0; i < NREPS; i++) {
+ for (j = 0; j < NTICKS; j++) {
+ expect_u_eq(ticker_read(&ticker), NTICKS - j,
+ "Unexpected ticker value (i=%d, j=%d)", i, j);
+ expect_false(ticker_tick(&ticker),
+ "Unexpected ticker fire (i=%d, j=%d)", i, j);
+ }
+ expect_u32_eq(ticker_read(&ticker), 0,
+ "Expected ticker depletion");
+ expect_true(ticker_tick(&ticker),
+ "Expected ticker fire (i=%d)", i);
+ expect_u32_eq(ticker_read(&ticker), NTICKS,
+ "Expected ticker reset");
+ }
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_ticks) {
+#define NTICKS 3
+ ticker_t ticker;
+
+ ticker_init(&ticker, NTICKS);
+
+ expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+ expect_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
+ expect_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
+ expect_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
+ expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+
+ expect_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
+ expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_copy) {
+#define NTICKS 3
+ ticker_t ta, tb;
+
+ ticker_init(&ta, NTICKS);
+ ticker_copy(&tb, &ta);
+ expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+ expect_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
+ expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+
+ ticker_tick(&ta);
+ ticker_copy(&tb, &ta);
+ expect_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
+ expect_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
+ expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_geom) {
+ const int32_t ticks = 100;
+ const uint64_t niters = 100 * 1000;
+
+ ticker_geom_t ticker;
+ ticker_geom_init(&ticker, ticks);
+ uint64_t total_ticks = 0;
+ /* Just some random constant. */
+ uint64_t prng_state = 0x343219f93496db9fULL;
+ for (uint64_t i = 0; i < niters; i++) {
+ while(!ticker_geom_tick(&ticker, &prng_state)) {
+ total_ticks++;
+ }
+ }
+ /*
+ * In fact, with this choice of random seed and the PRNG implementation
+ * used at the time this was tested, total_ticks is 95.1% of the
+ * expected ticks.
+ */
+ expect_u64_ge(total_ticks , niters * ticks * 9 / 10,
+ "Mean off by > 10%%");
+ expect_u64_le(total_ticks , niters * ticks * 11 / 10,
+ "Mean off by > 10%%");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_ticker_tick,
+ test_ticker_ticks,
+ test_ticker_copy,
+ test_ticker_geom);
+}
diff --git a/deps/jemalloc/test/unit/tsd.c b/deps/jemalloc/test/unit/tsd.c
new file mode 100644
index 0000000..205d870
--- /dev/null
+++ b/deps/jemalloc/test/unit/tsd.c
@@ -0,0 +1,274 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
+ * be asserting that we're on one.
+ */
+static bool originally_fast;
+static int data_cleanup_count;
+
+void
+data_cleanup(int *data) {
+ if (data_cleanup_count == 0) {
+ expect_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
+ "Argument passed into cleanup function should match tsd "
+ "value");
+ }
+ ++data_cleanup_count;
+
+ /*
+ * Allocate during cleanup for two rounds, in order to assure that
+ * jemalloc's internal tsd reinitialization happens.
+ */
+ bool reincarnate = false;
+ switch (*data) {
+ case MALLOC_TSD_TEST_DATA_INIT:
+ *data = 1;
+ reincarnate = true;
+ break;
+ case 1:
+ *data = 2;
+ reincarnate = true;
+ break;
+ case 2:
+ return;
+ default:
+ not_reached();
+ }
+
+ if (reincarnate) {
+ void *p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpeced mallocx() failure");
+ dallocx(p, 0);
+ }
+}
+
+static void *
+thd_start(void *arg) {
+ int d = (int)(uintptr_t)arg;
+ void *p;
+
+ /*
+ * Test free before tsd init -- the free fast path (which does not
+ * explicitly check for NULL) has to tolerate this case, and fall back
+ * to free_default.
+ */
+ free(NULL);
+
+ tsd_t *tsd = tsd_fetch();
+ expect_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
+ "Initial tsd get should return initialization value");
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+
+ tsd_test_data_set(tsd, d);
+ expect_x_eq(tsd_test_data_get(tsd), d,
+ "After tsd set, tsd get should return value that was set");
+
+ d = 0;
+ expect_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
+ "Resetting local data should have no effect on tsd");
+
+ tsd_test_callback_set(tsd, &data_cleanup);
+
+ free(p);
+ return NULL;
+}
+
+TEST_BEGIN(test_tsd_main_thread) {
+ thd_start((void *)(uintptr_t)0xa5f3e329);
+}
+TEST_END
+
+TEST_BEGIN(test_tsd_sub_thread) {
+ thd_t thd;
+
+ data_cleanup_count = 0;
+ thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT);
+ thd_join(thd, NULL);
+ /*
+ * We reincarnate twice in the data cleanup, so it should execute at
+ * least 3 times.
+ */
+ expect_x_ge(data_cleanup_count, 3,
+ "Cleanup function should have executed multiple times.");
+}
+TEST_END
+
+static void *
+thd_start_reincarnated(void *arg) {
+ tsd_t *tsd = tsd_fetch();
+ assert(tsd);
+
+ void *p = malloc(1);
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+
+ /* Manually trigger reincarnation. */
+ expect_ptr_not_null(tsd_arena_get(tsd),
+ "Should have tsd arena set.");
+ tsd_cleanup((void *)tsd);
+ expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ "TSD arena should have been cleared.");
+ expect_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
+ "TSD state should be purgatory\n");
+
+ free(p);
+ expect_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
+ "TSD state should be reincarnated\n");
+ p = mallocx(1, MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+ expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ "Should not have tsd arena set after reincarnation.");
+
+ free(p);
+ tsd_cleanup((void *)tsd);
+ expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ "TSD arena should have been cleared after 2nd cleanup.");
+
+ return NULL;
+}
+
+TEST_BEGIN(test_tsd_reincarnation) {
+ thd_t thd;
+ thd_create(&thd, thd_start_reincarnated, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+typedef struct {
+ atomic_u32_t phase;
+ atomic_b_t error;
+} global_slow_data_t;
+
+static void *
+thd_start_global_slow(void *arg) {
+ /* PHASE 0 */
+ global_slow_data_t *data = (global_slow_data_t *)arg;
+ free(mallocx(1, 0));
+
+ tsd_t *tsd = tsd_fetch();
+ /*
+ * No global slowness has happened yet; there was an error if we were
+ * originally fast but aren't now.
+ */
+ atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
+ ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
+
+ /* PHASE 2 */
+ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
+ }
+ free(mallocx(1, 0));
+ atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
+
+ /* PHASE 4 */
+ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
+ }
+ free(mallocx(1, 0));
+ atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
+
+ /* PHASE 6 */
+ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
+ }
+ free(mallocx(1, 0));
+ /* Only one decrement so far. */
+ atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
+
+ /* PHASE 8 */
+ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
+ }
+ free(mallocx(1, 0));
+ /*
+ * Both decrements happened; we should be fast again (if we ever
+ * were)
+ */
+ atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
+ ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
+
+ return NULL;
+}
+
+TEST_BEGIN(test_tsd_global_slow) {
+ global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
+ /*
+ * Note that the "mallocx" here (vs. malloc) is important, since the
+ * compiler is allowed to optimize away free(malloc(1)) but not
+ * free(mallocx(1)).
+ */
+ free(mallocx(1, 0));
+ tsd_t *tsd = tsd_fetch();
+ originally_fast = tsd_fast(tsd);
+
+ thd_t thd;
+ thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
+ /* PHASE 1 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
+ /*
+ * We don't have a portable condvar/semaphore mechanism.
+ * Spin-wait.
+ */
+ }
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ tsd_global_slow_inc(tsd_tsdn(tsd));
+ free(mallocx(1, 0));
+ expect_false(tsd_fast(tsd), "");
+ atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
+
+ /* PHASE 3 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
+ }
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ /* Increase again, so that we can test multiple fast/slow changes. */
+ tsd_global_slow_inc(tsd_tsdn(tsd));
+ atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
+ free(mallocx(1, 0));
+ expect_false(tsd_fast(tsd), "");
+
+ /* PHASE 5 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
+ }
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ tsd_global_slow_dec(tsd_tsdn(tsd));
+ atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
+ /* We only decreased once; things should still be slow. */
+ free(mallocx(1, 0));
+ expect_false(tsd_fast(tsd), "");
+
+ /* PHASE 7 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
+ }
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ tsd_global_slow_dec(tsd_tsdn(tsd));
+ atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
+ /* We incremented and then decremented twice; we should be fast now. */
+ free(mallocx(1, 0));
+ expect_true(!originally_fast || tsd_fast(tsd), "");
+
+ /* PHASE 9 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
+ }
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void) {
+ /* Ensure tsd bootstrapped. */
+ if (nallocx(1, 0) == 0) {
+ malloc_printf("Initialization error");
+ return test_status_fail;
+ }
+
+ return test_no_reentrancy(
+ test_tsd_main_thread,
+ test_tsd_sub_thread,
+ test_tsd_reincarnation,
+ test_tsd_global_slow);
+}
diff --git a/deps/jemalloc/test/unit/uaf.c b/deps/jemalloc/test/unit/uaf.c
new file mode 100644
index 0000000..a8433c2
--- /dev/null
+++ b/deps/jemalloc/test/unit/uaf.c
@@ -0,0 +1,262 @@
+#include "test/jemalloc_test.h"
+#include "test/arena_util.h"
+#include "test/san.h"
+
+#include "jemalloc/internal/cache_bin.h"
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/safety_check.h"
+
+const char *malloc_conf = TEST_SAN_UAF_ALIGN_ENABLE;
+
+static size_t san_uaf_align;
+
+static bool fake_abort_called;
+void fake_abort(const char *message) {
+ (void)message;
+ fake_abort_called = true;
+}
+
+static void
+test_write_after_free_pre(void) {
+ safety_check_set_abort(&fake_abort);
+ fake_abort_called = false;
+}
+
+static void
+test_write_after_free_post(void) {
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ 0, "Unexpected tcache flush failure");
+ expect_true(fake_abort_called, "Use-after-free check didn't fire.");
+ safety_check_set_abort(NULL);
+}
+
+static bool
+uaf_detection_enabled(void) {
+ if (!config_uaf_detection || !san_uaf_detection_enabled()) {
+ return false;
+ }
+
+ ssize_t lg_san_uaf_align;
+ size_t sz = sizeof(lg_san_uaf_align);
+ assert_d_eq(mallctl("opt.lg_san_uaf_align", &lg_san_uaf_align, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+ if (lg_san_uaf_align < 0) {
+ return false;
+ }
+ assert_zd_ge(lg_san_uaf_align, LG_PAGE, "san_uaf_align out of range");
+ san_uaf_align = (size_t)1 << lg_san_uaf_align;
+
+ bool tcache_enabled;
+ sz = sizeof(tcache_enabled);
+ assert_d_eq(mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+ if (!tcache_enabled) {
+ return false;
+ }
+
+ return true;
+}
+
+static size_t
+read_tcache_stashed_bytes(unsigned arena_ind) {
+ if (!config_stats) {
+ return 0;
+ }
+
+ uint64_t epoch;
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ size_t tcache_stashed_bytes;
+ size_t sz = sizeof(tcache_stashed_bytes);
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
+ ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ return tcache_stashed_bytes;
+}
+
+static void
+test_use_after_free(size_t alloc_size, bool write_after_free) {
+ void *ptr = (void *)(uintptr_t)san_uaf_align;
+ assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
+ ptr = (void *)((uintptr_t)123 * (uintptr_t)san_uaf_align);
+ assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
+ ptr = (void *)((uintptr_t)san_uaf_align + 1);
+ assert_false(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
+
+ /*
+ * Disable purging (-1) so that all dirty pages remain committed, to
+ * make use-after-free tolerable.
+ */
+ unsigned arena_ind = do_arena_create(-1, -1);
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ size_t n_max = san_uaf_align * 2;
+ void **items = mallocx(n_max * sizeof(void *), flags);
+ assert_ptr_not_null(items, "Unexpected mallocx failure");
+
+ bool found = false;
+ size_t iter = 0;
+ char magic = 's';
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ 0, "Unexpected tcache flush failure");
+ while (!found) {
+ ptr = mallocx(alloc_size, flags);
+ assert_ptr_not_null(ptr, "Unexpected mallocx failure");
+
+ found = cache_bin_nonfast_aligned(ptr);
+ *(char *)ptr = magic;
+ items[iter] = ptr;
+ assert_zu_lt(iter++, n_max, "No aligned ptr found");
+ }
+
+ if (write_after_free) {
+ test_write_after_free_pre();
+ }
+ bool junked = false;
+ while (iter-- != 0) {
+ char *volatile mem = items[iter];
+ assert_c_eq(*mem, magic, "Unexpected memory content");
+ size_t stashed_before = read_tcache_stashed_bytes(arena_ind);
+ free(mem);
+ if (*mem != magic) {
+ junked = true;
+ assert_c_eq(*mem, (char)uaf_detect_junk,
+ "Unexpected junk-filling bytes");
+ if (write_after_free) {
+ *(char *)mem = magic + 1;
+ }
+
+ size_t stashed_after = read_tcache_stashed_bytes(
+ arena_ind);
+ /*
+ * An edge case is the deallocation above triggering the
+ * tcache GC event, in which case the stashed pointers
+ * may get flushed immediately, before returning from
+ * free(). Treat these cases as checked already.
+ */
+ if (stashed_after <= stashed_before) {
+ fake_abort_called = true;
+ }
+ }
+ /* Flush tcache (including stashed). */
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ 0, "Unexpected tcache flush failure");
+ }
+ expect_true(junked, "Aligned ptr not junked");
+ if (write_after_free) {
+ test_write_after_free_post();
+ }
+
+ dallocx(items, flags);
+ do_arena_destroy(arena_ind);
+}
+
+TEST_BEGIN(test_read_after_free) {
+ test_skip_if(!uaf_detection_enabled());
+
+ test_use_after_free(sizeof(void *), /* write_after_free */ false);
+ test_use_after_free(sizeof(void *) + 1, /* write_after_free */ false);
+ test_use_after_free(16, /* write_after_free */ false);
+ test_use_after_free(20, /* write_after_free */ false);
+ test_use_after_free(32, /* write_after_free */ false);
+ test_use_after_free(33, /* write_after_free */ false);
+ test_use_after_free(48, /* write_after_free */ false);
+ test_use_after_free(64, /* write_after_free */ false);
+ test_use_after_free(65, /* write_after_free */ false);
+ test_use_after_free(129, /* write_after_free */ false);
+ test_use_after_free(255, /* write_after_free */ false);
+ test_use_after_free(256, /* write_after_free */ false);
+}
+TEST_END
+
+TEST_BEGIN(test_write_after_free) {
+ test_skip_if(!uaf_detection_enabled());
+
+ test_use_after_free(sizeof(void *), /* write_after_free */ true);
+ test_use_after_free(sizeof(void *) + 1, /* write_after_free */ true);
+ test_use_after_free(16, /* write_after_free */ true);
+ test_use_after_free(20, /* write_after_free */ true);
+ test_use_after_free(32, /* write_after_free */ true);
+ test_use_after_free(33, /* write_after_free */ true);
+ test_use_after_free(48, /* write_after_free */ true);
+ test_use_after_free(64, /* write_after_free */ true);
+ test_use_after_free(65, /* write_after_free */ true);
+ test_use_after_free(129, /* write_after_free */ true);
+ test_use_after_free(255, /* write_after_free */ true);
+ test_use_after_free(256, /* write_after_free */ true);
+}
+TEST_END
+
+static bool
+check_allocated_intact(void **allocated, size_t n_alloc) {
+ for (unsigned i = 0; i < n_alloc; i++) {
+ void *ptr = *(void **)allocated[i];
+ bool found = false;
+ for (unsigned j = 0; j < n_alloc; j++) {
+ if (ptr == allocated[j]) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+TEST_BEGIN(test_use_after_free_integration) {
+ test_skip_if(!uaf_detection_enabled());
+
+ unsigned arena_ind = do_arena_create(-1, -1);
+ int flags = MALLOCX_ARENA(arena_ind);
+
+ size_t n_alloc = san_uaf_align * 2;
+ void **allocated = mallocx(n_alloc * sizeof(void *), flags);
+ assert_ptr_not_null(allocated, "Unexpected mallocx failure");
+
+ for (unsigned i = 0; i < n_alloc; i++) {
+ allocated[i] = mallocx(sizeof(void *) * 8, flags);
+ assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
+ if (i > 0) {
+ /* Emulate a circular list. */
+ *(void **)allocated[i] = allocated[i - 1];
+ }
+ }
+ *(void **)allocated[0] = allocated[n_alloc - 1];
+ expect_true(check_allocated_intact(allocated, n_alloc),
+ "Allocated data corrupted");
+
+ for (unsigned i = 0; i < n_alloc; i++) {
+ free(allocated[i]);
+ }
+ /* Read-after-free */
+ expect_false(check_allocated_intact(allocated, n_alloc),
+ "Junk-filling not detected");
+
+ test_write_after_free_pre();
+ for (unsigned i = 0; i < n_alloc; i++) {
+ allocated[i] = mallocx(sizeof(void *), flags);
+ assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
+ *(void **)allocated[i] = (void *)(uintptr_t)i;
+ }
+ /* Write-after-free */
+ for (unsigned i = 0; i < n_alloc; i++) {
+ free(allocated[i]);
+ *(void **)allocated[i] = NULL;
+ }
+ test_write_after_free_post();
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_read_after_free,
+ test_write_after_free,
+ test_use_after_free_integration);
+}
diff --git a/deps/jemalloc/test/unit/witness.c b/deps/jemalloc/test/unit/witness.c
new file mode 100644
index 0000000..5a6c448
--- /dev/null
+++ b/deps/jemalloc/test/unit/witness.c
@@ -0,0 +1,280 @@
+#include "test/jemalloc_test.h"
+
+static witness_lock_error_t *witness_lock_error_orig;
+static witness_owner_error_t *witness_owner_error_orig;
+static witness_not_owner_error_t *witness_not_owner_error_orig;
+static witness_depth_error_t *witness_depth_error_orig;
+
+static bool saw_lock_error;
+static bool saw_owner_error;
+static bool saw_not_owner_error;
+static bool saw_depth_error;
+
+static void
+witness_lock_error_intercept(const witness_list_t *witnesses,
+ const witness_t *witness) {
+ saw_lock_error = true;
+}
+
+static void
+witness_owner_error_intercept(const witness_t *witness) {
+ saw_owner_error = true;
+}
+
+static void
+witness_not_owner_error_intercept(const witness_t *witness) {
+ saw_not_owner_error = true;
+}
+
+static void
+witness_depth_error_intercept(const witness_list_t *witnesses,
+ witness_rank_t rank_inclusive, unsigned depth) {
+ saw_depth_error = true;
+}
+
+static int
+witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
+ expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
+
+ assert(oa == (void *)a);
+ assert(ob == (void *)b);
+
+ return strcmp(a->name, b->name);
+}
+
+static int
+witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b,
+ void *ob) {
+ expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
+
+ assert(oa == (void *)a);
+ assert(ob == (void *)b);
+
+ return -strcmp(a->name, b->name);
+}
+
+TEST_BEGIN(test_witness) {
+ witness_t a, b;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+ witness_assert_not_owner(&witness_tsdn, &a);
+ witness_lock(&witness_tsdn, &a);
+ witness_assert_owner(&witness_tsdn, &a);
+ witness_assert_depth(&witness_tsdn, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0);
+
+ witness_init(&b, "b", 2, NULL, NULL);
+ witness_assert_not_owner(&witness_tsdn, &b);
+ witness_lock(&witness_tsdn, &b);
+ witness_assert_owner(&witness_tsdn, &b);
+ witness_assert_depth(&witness_tsdn, 2);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0);
+
+ witness_unlock(&witness_tsdn, &a);
+ witness_assert_depth(&witness_tsdn, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0);
+ witness_unlock(&witness_tsdn, &b);
+
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_witness_comp) {
+ witness_t a, b, c, d;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_init(&a, "a", 1, witness_comp, &a);
+ witness_assert_not_owner(&witness_tsdn, &a);
+ witness_lock(&witness_tsdn, &a);
+ witness_assert_owner(&witness_tsdn, &a);
+ witness_assert_depth(&witness_tsdn, 1);
+
+ witness_init(&b, "b", 1, witness_comp, &b);
+ witness_assert_not_owner(&witness_tsdn, &b);
+ witness_lock(&witness_tsdn, &b);
+ witness_assert_owner(&witness_tsdn, &b);
+ witness_assert_depth(&witness_tsdn, 2);
+ witness_unlock(&witness_tsdn, &b);
+ witness_assert_depth(&witness_tsdn, 1);
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ witness_init(&c, "c", 1, witness_comp_reverse, &c);
+ witness_assert_not_owner(&witness_tsdn, &c);
+ expect_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(&witness_tsdn, &c);
+ expect_true(saw_lock_error, "Expected witness lock error");
+ witness_unlock(&witness_tsdn, &c);
+ witness_assert_depth(&witness_tsdn, 1);
+
+ saw_lock_error = false;
+
+ witness_init(&d, "d", 1, NULL, NULL);
+ witness_assert_not_owner(&witness_tsdn, &d);
+ expect_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(&witness_tsdn, &d);
+ expect_true(saw_lock_error, "Expected witness lock error");
+ witness_unlock(&witness_tsdn, &d);
+ witness_assert_depth(&witness_tsdn, 1);
+
+ witness_unlock(&witness_tsdn, &a);
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_lock_error = witness_lock_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_reversal) {
+ witness_t a, b;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+ witness_init(&b, "b", 2, NULL, NULL);
+
+ witness_lock(&witness_tsdn, &b);
+ witness_assert_depth(&witness_tsdn, 1);
+ expect_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(&witness_tsdn, &a);
+ expect_true(saw_lock_error, "Expected witness lock error");
+
+ witness_unlock(&witness_tsdn, &a);
+ witness_assert_depth(&witness_tsdn, 1);
+ witness_unlock(&witness_tsdn, &b);
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_lock_error = witness_lock_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_recursive) {
+ witness_t a;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_not_owner_error_orig = witness_not_owner_error;
+ witness_not_owner_error = witness_not_owner_error_intercept;
+ saw_not_owner_error = false;
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+
+ witness_lock(&witness_tsdn, &a);
+ expect_false(saw_lock_error, "Unexpected witness lock error");
+ expect_false(saw_not_owner_error, "Unexpected witness not owner error");
+ witness_lock(&witness_tsdn, &a);
+ expect_true(saw_lock_error, "Expected witness lock error");
+ expect_true(saw_not_owner_error, "Expected witness not owner error");
+
+ witness_unlock(&witness_tsdn, &a);
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_owner_error = witness_owner_error_orig;
+ witness_lock_error = witness_lock_error_orig;
+
+}
+TEST_END
+
+TEST_BEGIN(test_witness_unlock_not_owned) {
+ witness_t a;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_owner_error_orig = witness_owner_error;
+ witness_owner_error = witness_owner_error_intercept;
+ saw_owner_error = false;
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+
+ expect_false(saw_owner_error, "Unexpected owner error");
+ witness_unlock(&witness_tsdn, &a);
+ expect_true(saw_owner_error, "Expected owner error");
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_owner_error = witness_owner_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_depth) {
+ witness_t a;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_depth_error_orig = witness_depth_error;
+ witness_depth_error = witness_depth_error_intercept;
+ saw_depth_error = false;
+
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+
+ expect_false(saw_depth_error, "Unexpected depth error");
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+
+ witness_lock(&witness_tsdn, &a);
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+ expect_true(saw_depth_error, "Expected depth error");
+
+ witness_unlock(&witness_tsdn, &a);
+
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+
+ witness_depth_error = witness_depth_error_orig;
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_witness,
+ test_witness_comp,
+ test_witness_reversal,
+ test_witness_recursive,
+ test_witness_unlock_not_owned,
+ test_witness_depth);
+}
diff --git a/deps/jemalloc/test/unit/zero.c b/deps/jemalloc/test/unit/zero.c
new file mode 100644
index 0000000..d3e81f1
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero.c
@@ -0,0 +1,59 @@
+#include "test/jemalloc_test.h"
+
+static void
+test_zero(size_t sz_min, size_t sz_max) {
+ uint8_t *s;
+ size_t sz_prev, sz, i;
+#define MAGIC ((uint8_t)0x61)
+
+ sz_prev = 0;
+ s = (uint8_t *)mallocx(sz_min, 0);
+ expect_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+
+ for (sz = sallocx(s, 0); sz <= sz_max;
+ sz_prev = sz, sz = sallocx(s, 0)) {
+ if (sz_prev > 0) {
+ expect_u_eq(s[0], MAGIC,
+ "Previously allocated byte %zu/%zu is corrupted",
+ ZU(0), sz_prev);
+ expect_u_eq(s[sz_prev-1], MAGIC,
+ "Previously allocated byte %zu/%zu is corrupted",
+ sz_prev-1, sz_prev);
+ }
+
+ for (i = sz_prev; i < sz; i++) {
+ expect_u_eq(s[i], 0x0,
+ "Newly allocated byte %zu/%zu isn't zero-filled",
+ i, sz);
+ s[i] = MAGIC;
+ }
+
+ if (xallocx(s, sz+1, 0, 0) == sz) {
+ s = (uint8_t *)rallocx(s, sz+1, 0);
+ expect_ptr_not_null((void *)s,
+ "Unexpected rallocx() failure");
+ }
+ }
+
+ dallocx(s, 0);
+#undef MAGIC
+}
+
+TEST_BEGIN(test_zero_small) {
+ test_skip_if(!config_fill);
+ test_zero(1, SC_SMALL_MAXCLASS - 1);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_large) {
+ test_skip_if(!config_fill);
+ test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1));
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_zero_small,
+ test_zero_large);
+}
diff --git a/deps/jemalloc/test/unit/zero.sh b/deps/jemalloc/test/unit/zero.sh
new file mode 100644
index 0000000..b4540b2
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="abort:false,junk:false,zero:true"
+fi
diff --git a/deps/jemalloc/test/unit/zero_realloc_abort.c b/deps/jemalloc/test/unit/zero_realloc_abort.c
new file mode 100644
index 0000000..a880d10
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_abort.c
@@ -0,0 +1,26 @@
+#include "test/jemalloc_test.h"
+
+#include <signal.h>
+
+static bool abort_called = false;
+
+void set_abort_called() {
+ abort_called = true;
+};
+
+TEST_BEGIN(test_realloc_abort) {
+ abort_called = false;
+ safety_check_set_abort(&set_abort_called);
+ void *ptr = mallocx(42, 0);
+ expect_ptr_not_null(ptr, "Unexpected mallocx error");
+ ptr = realloc(ptr, 0);
+ expect_true(abort_called, "Realloc with zero size didn't abort");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_realloc_abort);
+}
+
diff --git a/deps/jemalloc/test/unit/zero_realloc_abort.sh b/deps/jemalloc/test/unit/zero_realloc_abort.sh
new file mode 100644
index 0000000..37daeea
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_abort.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="zero_realloc:abort"
diff --git a/deps/jemalloc/test/unit/zero_realloc_alloc.c b/deps/jemalloc/test/unit/zero_realloc_alloc.c
new file mode 100644
index 0000000..65e07bd
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_alloc.c
@@ -0,0 +1,48 @@
+#include "test/jemalloc_test.h"
+
+static uint64_t
+allocated() {
+ if (!config_stats) {
+ return 0;
+ }
+ uint64_t allocated;
+ size_t sz = sizeof(allocated);
+ expect_d_eq(mallctl("thread.allocated", (void *)&allocated, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+ return allocated;
+}
+
+static uint64_t
+deallocated() {
+ if (!config_stats) {
+ return 0;
+ }
+ uint64_t deallocated;
+ size_t sz = sizeof(deallocated);
+ expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+ return deallocated;
+}
+
+TEST_BEGIN(test_realloc_alloc) {
+ void *ptr = mallocx(1, 0);
+ expect_ptr_not_null(ptr, "Unexpected mallocx error");
+ uint64_t allocated_before = allocated();
+ uint64_t deallocated_before = deallocated();
+ ptr = realloc(ptr, 0);
+ uint64_t allocated_after = allocated();
+ uint64_t deallocated_after = deallocated();
+ if (config_stats) {
+ expect_u64_lt(allocated_before, allocated_after,
+ "Unexpected stats change");
+ expect_u64_lt(deallocated_before, deallocated_after,
+ "Unexpected stats change");
+ }
+ dallocx(ptr, 0);
+}
+TEST_END
+int
+main(void) {
+ return test(
+ test_realloc_alloc);
+}
diff --git a/deps/jemalloc/test/unit/zero_realloc_alloc.sh b/deps/jemalloc/test/unit/zero_realloc_alloc.sh
new file mode 100644
index 0000000..802687c
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_alloc.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="zero_realloc:alloc"
diff --git a/deps/jemalloc/test/unit/zero_realloc_free.c b/deps/jemalloc/test/unit/zero_realloc_free.c
new file mode 100644
index 0000000..baed86c
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_free.c
@@ -0,0 +1,33 @@
+#include "test/jemalloc_test.h"
+
+static uint64_t
+deallocated() {
+ if (!config_stats) {
+ return 0;
+ }
+ uint64_t deallocated;
+ size_t sz = sizeof(deallocated);
+ expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+ return deallocated;
+}
+
+TEST_BEGIN(test_realloc_free) {
+ void *ptr = mallocx(42, 0);
+ expect_ptr_not_null(ptr, "Unexpected mallocx error");
+ uint64_t deallocated_before = deallocated();
+ ptr = realloc(ptr, 0);
+ uint64_t deallocated_after = deallocated();
+ expect_ptr_null(ptr, "Realloc didn't free");
+ if (config_stats) {
+ expect_u64_gt(deallocated_after, deallocated_before,
+ "Realloc didn't free");
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_realloc_free);
+}
diff --git a/deps/jemalloc/test/unit/zero_realloc_free.sh b/deps/jemalloc/test/unit/zero_realloc_free.sh
new file mode 100644
index 0000000..51b01c9
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_free.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="zero_realloc:free"
diff --git a/deps/jemalloc/test/unit/zero_reallocs.c b/deps/jemalloc/test/unit/zero_reallocs.c
new file mode 100644
index 0000000..66c7a40
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_reallocs.c
@@ -0,0 +1,40 @@
+#include "test/jemalloc_test.h"
+
+static size_t
+zero_reallocs() {
+ if (!config_stats) {
+ return 0;
+ }
+ size_t count = 12345;
+ size_t sz = sizeof(count);
+
+ expect_d_eq(mallctl("stats.zero_reallocs", (void *)&count, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+ return count;
+}
+
+TEST_BEGIN(test_zero_reallocs) {
+ test_skip_if(!config_stats);
+
+ for (size_t i = 0; i < 100; ++i) {
+ void *ptr = mallocx(i * i + 1, 0);
+ expect_ptr_not_null(ptr, "Unexpected mallocx error");
+ size_t count = zero_reallocs();
+ expect_zu_eq(i, count, "Incorrect zero realloc count");
+ ptr = realloc(ptr, 0);
+ expect_ptr_null(ptr, "Realloc didn't free");
+ count = zero_reallocs();
+ expect_zu_eq(i + 1, count, "Realloc didn't adjust count");
+ }
+}
+TEST_END
+
+int
+main(void) {
+ /*
+ * We expect explicit counts; reentrant tests run multiple times, so
+ * counts leak across runs.
+ */
+ return test_no_reentrancy(
+ test_zero_reallocs);
+}
diff --git a/deps/jemalloc/test/unit/zero_reallocs.sh b/deps/jemalloc/test/unit/zero_reallocs.sh
new file mode 100644
index 0000000..51b01c9
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_reallocs.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="zero_realloc:free"