summaryrefslogtreecommitdiffstats
path: root/deps/jemalloc/test
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 17:31:02 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 17:31:02 +0000
commitbb12c1fd00eb51118749bbbc69c5596835fcbd3b (patch)
tree88038a98bd31c1b765f3390767a2ec12e37c79ec /deps/jemalloc/test
parentInitial commit. (diff)
downloadredis-upstream.tar.xz
redis-upstream.zip
Adding upstream version 5:7.0.15.upstream/5%7.0.15upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'deps/jemalloc/test')
-rw-r--r--deps/jemalloc/test/include/test/SFMT-alti.h186
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params.h132
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params11213.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params1279.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params132049.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params19937.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params216091.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params2281.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params4253.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params44497.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params607.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params86243.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-sse2.h157
-rw-r--r--deps/jemalloc/test/include/test/SFMT.h146
-rw-r--r--deps/jemalloc/test/include/test/btalloc.h30
-rw-r--r--deps/jemalloc/test/include/test/extent_hooks.h289
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test.h.in173
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test_defs.h.in9
-rw-r--r--deps/jemalloc/test/include/test/math.h306
-rw-r--r--deps/jemalloc/test/include/test/mq.h107
-rw-r--r--deps/jemalloc/test/include/test/mtx.h21
-rw-r--r--deps/jemalloc/test/include/test/test.h338
-rw-r--r--deps/jemalloc/test/include/test/thd.h9
-rw-r--r--deps/jemalloc/test/include/test/timer.h11
-rw-r--r--deps/jemalloc/test/integration/MALLOCX_ARENA.c66
-rw-r--r--deps/jemalloc/test/integration/aligned_alloc.c157
-rw-r--r--deps/jemalloc/test/integration/allocated.c124
-rw-r--r--deps/jemalloc/test/integration/cpp/basic.cpp25
-rw-r--r--deps/jemalloc/test/integration/extent.c248
-rw-r--r--deps/jemalloc/test/integration/extent.sh5
-rw-r--r--deps/jemalloc/test/integration/malloc.c16
-rw-r--r--deps/jemalloc/test/integration/mallocx.c274
-rw-r--r--deps/jemalloc/test/integration/mallocx.sh5
-rw-r--r--deps/jemalloc/test/integration/overflow.c59
-rw-r--r--deps/jemalloc/test/integration/posix_memalign.c128
-rw-r--r--deps/jemalloc/test/integration/rallocx.c258
-rw-r--r--deps/jemalloc/test/integration/sdallocx.c55
-rw-r--r--deps/jemalloc/test/integration/slab_sizes.c80
-rw-r--r--deps/jemalloc/test/integration/slab_sizes.sh4
-rw-r--r--deps/jemalloc/test/integration/smallocx.c312
-rw-r--r--deps/jemalloc/test/integration/smallocx.sh5
-rw-r--r--deps/jemalloc/test/integration/thread_arena.c86
-rw-r--r--deps/jemalloc/test/integration/thread_tcache_enabled.c87
-rw-r--r--deps/jemalloc/test/integration/xallocx.c384
-rw-r--r--deps/jemalloc/test/integration/xallocx.sh5
-rw-r--r--deps/jemalloc/test/src/SFMT.c719
-rw-r--r--deps/jemalloc/test/src/btalloc.c6
-rw-r--r--deps/jemalloc/test/src/btalloc_0.c3
-rw-r--r--deps/jemalloc/test/src/btalloc_1.c3
-rw-r--r--deps/jemalloc/test/src/math.c2
-rw-r--r--deps/jemalloc/test/src/mq.c27
-rw-r--r--deps/jemalloc/test/src/mtx.c61
-rw-r--r--deps/jemalloc/test/src/test.c234
-rw-r--r--deps/jemalloc/test/src/thd.c34
-rw-r--r--deps/jemalloc/test/src/timer.c56
-rw-r--r--deps/jemalloc/test/stress/hookbench.c73
-rw-r--r--deps/jemalloc/test/stress/microbench.c165
-rw-r--r--deps/jemalloc/test/test.sh.in80
-rw-r--r--deps/jemalloc/test/unit/SFMT.c1599
-rw-r--r--deps/jemalloc/test/unit/a0.c16
-rw-r--r--deps/jemalloc/test/unit/arena_reset.c349
-rw-r--r--deps/jemalloc/test/unit/arena_reset_prof.c4
-rw-r--r--deps/jemalloc/test/unit/arena_reset_prof.sh3
-rw-r--r--deps/jemalloc/test/unit/atomic.c229
-rw-r--r--deps/jemalloc/test/unit/background_thread.c119
-rw-r--r--deps/jemalloc/test/unit/background_thread_enable.c85
-rw-r--r--deps/jemalloc/test/unit/base.c234
-rw-r--r--deps/jemalloc/test/unit/binshard.c154
-rw-r--r--deps/jemalloc/test/unit/binshard.sh3
-rw-r--r--deps/jemalloc/test/unit/bit_util.c111
-rw-r--r--deps/jemalloc/test/unit/bitmap.c431
-rw-r--r--deps/jemalloc/test/unit/ckh.c211
-rw-r--r--deps/jemalloc/test/unit/decay.c605
-rw-r--r--deps/jemalloc/test/unit/decay.sh3
-rw-r--r--deps/jemalloc/test/unit/div.c29
-rw-r--r--deps/jemalloc/test/unit/emitter.c469
-rw-r--r--deps/jemalloc/test/unit/extent_quantize.c141
-rw-r--r--deps/jemalloc/test/unit/extent_util.c269
-rw-r--r--deps/jemalloc/test/unit/fork.c141
-rw-r--r--deps/jemalloc/test/unit/hash.c173
-rw-r--r--deps/jemalloc/test/unit/hook.c580
-rw-r--r--deps/jemalloc/test/unit/huge.c108
-rw-r--r--deps/jemalloc/test/unit/junk.c141
-rw-r--r--deps/jemalloc/test/unit/junk.sh5
-rw-r--r--deps/jemalloc/test/unit/junk_alloc.c1
-rw-r--r--deps/jemalloc/test/unit/junk_alloc.sh5
-rw-r--r--deps/jemalloc/test/unit/junk_free.c1
-rw-r--r--deps/jemalloc/test/unit/junk_free.sh5
-rw-r--r--deps/jemalloc/test/unit/log.c193
-rw-r--r--deps/jemalloc/test/unit/mallctl.c888
-rw-r--r--deps/jemalloc/test/unit/malloc_io.c258
-rw-r--r--deps/jemalloc/test/unit/math.c390
-rw-r--r--deps/jemalloc/test/unit/mq.c89
-rw-r--r--deps/jemalloc/test/unit/mtx.c57
-rw-r--r--deps/jemalloc/test/unit/nstime.c249
-rw-r--r--deps/jemalloc/test/unit/pack.c166
-rw-r--r--deps/jemalloc/test/unit/pack.sh4
-rw-r--r--deps/jemalloc/test/unit/pages.c29
-rw-r--r--deps/jemalloc/test/unit/ph.c318
-rw-r--r--deps/jemalloc/test/unit/prng.c237
-rw-r--r--deps/jemalloc/test/unit/prof_accum.c81
-rw-r--r--deps/jemalloc/test/unit/prof_accum.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_active.c117
-rw-r--r--deps/jemalloc/test/unit/prof_active.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_gdump.c74
-rw-r--r--deps/jemalloc/test/unit/prof_gdump.sh6
-rw-r--r--deps/jemalloc/test/unit/prof_idump.c42
-rw-r--r--deps/jemalloc/test/unit/prof_idump.sh8
-rw-r--r--deps/jemalloc/test/unit/prof_log.c148
-rw-r--r--deps/jemalloc/test/unit/prof_log.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_reset.c286
-rw-r--r--deps/jemalloc/test/unit/prof_reset.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_tctx.c46
-rw-r--r--deps/jemalloc/test/unit/prof_tctx.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_thread_name.c120
-rw-r--r--deps/jemalloc/test/unit/prof_thread_name.sh5
-rw-r--r--deps/jemalloc/test/unit/ql.c204
-rw-r--r--deps/jemalloc/test/unit/qr.c243
-rw-r--r--deps/jemalloc/test/unit/rb.c355
-rw-r--r--deps/jemalloc/test/unit/retained.c184
-rw-r--r--deps/jemalloc/test/unit/rtree.c228
-rw-r--r--deps/jemalloc/test/unit/safety_check.c156
-rw-r--r--deps/jemalloc/test/unit/safety_check.sh5
-rw-r--r--deps/jemalloc/test/unit/sc.c33
-rw-r--r--deps/jemalloc/test/unit/seq.c95
-rw-r--r--deps/jemalloc/test/unit/size_classes.c188
-rw-r--r--deps/jemalloc/test/unit/slab.c33
-rw-r--r--deps/jemalloc/test/unit/smoothstep.c102
-rw-r--r--deps/jemalloc/test/unit/spin.c18
-rw-r--r--deps/jemalloc/test/unit/stats.c374
-rw-r--r--deps/jemalloc/test/unit/stats_print.c999
-rw-r--r--deps/jemalloc/test/unit/test_hooks.c38
-rw-r--r--deps/jemalloc/test/unit/ticker.c73
-rw-r--r--deps/jemalloc/test/unit/tsd.c267
-rw-r--r--deps/jemalloc/test/unit/witness.c280
-rw-r--r--deps/jemalloc/test/unit/zero.c59
-rw-r--r--deps/jemalloc/test/unit/zero.sh5
137 files changed, 20569 insertions, 0 deletions
diff --git a/deps/jemalloc/test/include/test/SFMT-alti.h b/deps/jemalloc/test/include/test/SFMT-alti.h
new file mode 100644
index 0000000..a1885db
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-alti.h
@@ -0,0 +1,186 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT-alti.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ * pseudorandom number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ */
+
+#ifndef SFMT_ALTI_H
+#define SFMT_ALTI_H
+
+/**
+ * This function represents the recursion formula in AltiVec and BIG ENDIAN.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @return output
+ */
+JEMALLOC_ALWAYS_INLINE
+vector unsigned int vec_recursion(vector unsigned int a,
+ vector unsigned int b,
+ vector unsigned int c,
+ vector unsigned int d) {
+
+ const vector unsigned int sl1 = ALTI_SL1;
+ const vector unsigned int sr1 = ALTI_SR1;
+#ifdef ONLY64
+ const vector unsigned int mask = ALTI_MSK64;
+ const vector unsigned char perm_sl = ALTI_SL2_PERM64;
+ const vector unsigned char perm_sr = ALTI_SR2_PERM64;
+#else
+ const vector unsigned int mask = ALTI_MSK;
+ const vector unsigned char perm_sl = ALTI_SL2_PERM;
+ const vector unsigned char perm_sr = ALTI_SR2_PERM;
+#endif
+ vector unsigned int v, w, x, y, z;
+ x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
+ v = a;
+ y = vec_sr(b, sr1);
+ z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
+ w = vec_sl(d, sl1);
+ z = vec_xor(z, w);
+ y = vec_and(y, mask);
+ v = vec_xor(v, x);
+ z = vec_xor(z, y);
+ z = vec_xor(z, v);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+static inline void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ vector unsigned int r, r1, r2;
+
+ r1 = ctx->sfmt[N - 2].s;
+ r2 = ctx->sfmt[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
+ ctx->sfmt[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
+ ctx->sfmt[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ vector unsigned int r, r1, r2;
+
+ r1 = ctx->sfmt[N - 2].s;
+ r2 = ctx->sfmt[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ ctx->sfmt[j].s = array[j + size - N].s;
+ }
+ for (; i < size; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ ctx->sfmt[j++].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#ifndef ONLY64
+#if defined(__APPLE__)
+#define ALTI_SWAP (vector unsigned char) \
+ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
+#else
+#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
+#endif
+/**
+ * This function swaps high and low 32-bit of 64-bit integers in user
+ * specified array.
+ *
+ * @param array an 128-bit array to be swaped.
+ * @param size size of 128-bit array.
+ */
+static inline void swap(w128_t *array, int size) {
+ int i;
+ const vector unsigned char perm = ALTI_SWAP;
+
+ for (i = 0; i < size; i++) {
+ array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
+ }
+}
+#endif
+
+#endif
diff --git a/deps/jemalloc/test/include/test/SFMT-params.h b/deps/jemalloc/test/include/test/SFMT-params.h
new file mode 100644
index 0000000..ade6622
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params.h
@@ -0,0 +1,132 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS_H
+#define SFMT_PARAMS_H
+
+#if !defined(MEXP)
+#ifdef __GNUC__
+ #warning "MEXP is not defined. I assume MEXP is 19937."
+#endif
+ #define MEXP 19937
+#endif
+/*-----------------
+ BASIC DEFINITIONS
+ -----------------*/
+/** Mersenne Exponent. The period of the sequence
+ * is a multiple of 2^MEXP-1.
+ * #define MEXP 19937 */
+/** SFMT generator has an internal state array of 128-bit integers,
+ * and N is its size. */
+#define N (MEXP / 128 + 1)
+/** N32 is the size of internal state array when regarded as an array
+ * of 32-bit integers.*/
+#define N32 (N * 4)
+/** N64 is the size of internal state array when regarded as an array
+ * of 64-bit integers.*/
+#define N64 (N * 2)
+
+/*----------------------
+ the parameters of SFMT
+ following definitions are in paramsXXXX.h file.
+ ----------------------*/
+/** the pick up position of the array.
+#define POS1 122
+*/
+
+/** the parameter of shift left as four 32-bit registers.
+#define SL1 18
+ */
+
+/** the parameter of shift left as one 128-bit register.
+ * The 128-bit integer is shifted by (SL2 * 8) bits.
+#define SL2 1
+*/
+
+/** the parameter of shift right as four 32-bit registers.
+#define SR1 11
+*/
+
+/** the parameter of shift right as one 128-bit register.
+ * The 128-bit integer is shifted by (SL2 * 8) bits.
+#define SR2 1
+*/
+
+/** A bitmask, used in the recursion. These parameters are introduced
+ * to break symmetry of SIMD.
+#define MSK1 0xdfffffefU
+#define MSK2 0xddfecb7fU
+#define MSK3 0xbffaffffU
+#define MSK4 0xbffffff6U
+*/
+
+/** These definitions are part of a 128-bit period certification vector.
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0xc98e126aU
+*/
+
+#if MEXP == 607
+ #include "test/SFMT-params607.h"
+#elif MEXP == 1279
+ #include "test/SFMT-params1279.h"
+#elif MEXP == 2281
+ #include "test/SFMT-params2281.h"
+#elif MEXP == 4253
+ #include "test/SFMT-params4253.h"
+#elif MEXP == 11213
+ #include "test/SFMT-params11213.h"
+#elif MEXP == 19937
+ #include "test/SFMT-params19937.h"
+#elif MEXP == 44497
+ #include "test/SFMT-params44497.h"
+#elif MEXP == 86243
+ #include "test/SFMT-params86243.h"
+#elif MEXP == 132049
+ #include "test/SFMT-params132049.h"
+#elif MEXP == 216091
+ #include "test/SFMT-params216091.h"
+#else
+#ifdef __GNUC__
+ #error "MEXP is not valid."
+ #undef MEXP
+#else
+ #undef MEXP
+#endif
+
+#endif
+
+#endif /* SFMT_PARAMS_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params11213.h b/deps/jemalloc/test/include/test/SFMT-params11213.h
new file mode 100644
index 0000000..2994bd2
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params11213.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS11213_H
+#define SFMT_PARAMS11213_H
+
+#define POS1 68
+#define SL1 14
+#define SL2 3
+#define SR1 7
+#define SR2 3
+#define MSK1 0xeffff7fbU
+#define MSK2 0xffffffefU
+#define MSK3 0xdfdfbfffU
+#define MSK4 0x7fffdbfdU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xe8148000U
+#define PARITY4 0xd0c7afa3U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd"
+
+#endif /* SFMT_PARAMS11213_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params1279.h b/deps/jemalloc/test/include/test/SFMT-params1279.h
new file mode 100644
index 0000000..d7959f9
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params1279.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS1279_H
+#define SFMT_PARAMS1279_H
+
+#define POS1 7
+#define SL1 14
+#define SL2 3
+#define SR1 5
+#define SR2 1
+#define MSK1 0xf7fefffdU
+#define MSK2 0x7fefcfffU
+#define MSK3 0xaff3ef3fU
+#define MSK4 0xb5ffff7fU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x20000000U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f"
+
+#endif /* SFMT_PARAMS1279_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params132049.h b/deps/jemalloc/test/include/test/SFMT-params132049.h
new file mode 100644
index 0000000..a1dcec3
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params132049.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS132049_H
+#define SFMT_PARAMS132049_H
+
+#define POS1 110
+#define SL1 19
+#define SL2 1
+#define SR1 21
+#define SR2 1
+#define MSK1 0xffffbb5fU
+#define MSK2 0xfb6ebf95U
+#define MSK3 0xfffefffaU
+#define MSK4 0xcff77fffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xcb520000U
+#define PARITY4 0xc7e91c7dU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
+
+#endif /* SFMT_PARAMS132049_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params19937.h b/deps/jemalloc/test/include/test/SFMT-params19937.h
new file mode 100644
index 0000000..fb92b4c
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params19937.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS19937_H
+#define SFMT_PARAMS19937_H
+
+#define POS1 122
+#define SL1 18
+#define SL2 1
+#define SR1 11
+#define SR2 1
+#define MSK1 0xdfffffefU
+#define MSK2 0xddfecb7fU
+#define MSK3 0xbffaffffU
+#define MSK4 0xbffffff6U
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x13c9e684U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6"
+
+#endif /* SFMT_PARAMS19937_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params216091.h b/deps/jemalloc/test/include/test/SFMT-params216091.h
new file mode 100644
index 0000000..125ce28
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params216091.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS216091_H
+#define SFMT_PARAMS216091_H
+
+#define POS1 627
+#define SL1 11
+#define SL2 3
+#define SR1 10
+#define SR2 1
+#define MSK1 0xbff7bff7U
+#define MSK2 0xbfffffffU
+#define MSK3 0xbffffa7fU
+#define MSK4 0xffddfbfbU
+#define PARITY1 0xf8000001U
+#define PARITY2 0x89e80709U
+#define PARITY3 0x3bd2b64bU
+#define PARITY4 0x0c64b1e4U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb"
+
+#endif /* SFMT_PARAMS216091_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params2281.h b/deps/jemalloc/test/include/test/SFMT-params2281.h
new file mode 100644
index 0000000..0ef85c4
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params2281.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS2281_H
+#define SFMT_PARAMS2281_H
+
+#define POS1 12
+#define SL1 19
+#define SL2 1
+#define SR1 5
+#define SR2 1
+#define MSK1 0xbff7ffbfU
+#define MSK2 0xfdfffffeU
+#define MSK3 0xf7ffef7fU
+#define MSK4 0xf2f7cbbfU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x41dfa600U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf"
+
+#endif /* SFMT_PARAMS2281_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params4253.h b/deps/jemalloc/test/include/test/SFMT-params4253.h
new file mode 100644
index 0000000..9f07bc6
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params4253.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS4253_H
+#define SFMT_PARAMS4253_H
+
+#define POS1 17
+#define SL1 20
+#define SL2 1
+#define SR1 7
+#define SR2 1
+#define MSK1 0x9f7bffffU
+#define MSK2 0x9fffff5fU
+#define MSK3 0x3efffffbU
+#define MSK4 0xfffff7bbU
+#define PARITY1 0xa8000001U
+#define PARITY2 0xaf5390a3U
+#define PARITY3 0xb740b3f8U
+#define PARITY4 0x6c11486dU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb"
+
+#endif /* SFMT_PARAMS4253_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params44497.h b/deps/jemalloc/test/include/test/SFMT-params44497.h
new file mode 100644
index 0000000..85598fe
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params44497.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS44497_H
+#define SFMT_PARAMS44497_H
+
+#define POS1 330
+#define SL1 5
+#define SL2 3
+#define SR1 9
+#define SR2 3
+#define MSK1 0xeffffffbU
+#define MSK2 0xdfbebfffU
+#define MSK3 0xbfbf7befU
+#define MSK4 0x9ffd7bffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xa3ac4000U
+#define PARITY4 0xecc1327aU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff"
+
+#endif /* SFMT_PARAMS44497_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params607.h b/deps/jemalloc/test/include/test/SFMT-params607.h
new file mode 100644
index 0000000..bc76485
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params607.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS607_H
+#define SFMT_PARAMS607_H
+
+#define POS1 2
+#define SL1 15
+#define SL2 3
+#define SR1 13
+#define SR2 3
+#define MSK1 0xfdff37ffU
+#define MSK2 0xef7f3f7dU
+#define MSK3 0xff777b7dU
+#define MSK4 0x7ff7fb2fU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x5986f054U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f"
+
+#endif /* SFMT_PARAMS607_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params86243.h b/deps/jemalloc/test/include/test/SFMT-params86243.h
new file mode 100644
index 0000000..5e4d783
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params86243.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS86243_H
+#define SFMT_PARAMS86243_H
+
+#define POS1 366
+#define SL1 6
+#define SL2 7
+#define SR1 19
+#define SR2 1
+#define MSK1 0xfdbffbffU
+#define MSK2 0xbff7ff3fU
+#define MSK3 0xfd77efffU
+#define MSK4 0xbf9ff3ffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0xe9528d85U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
+ #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
+
+#endif /* SFMT_PARAMS86243_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-sse2.h b/deps/jemalloc/test/include/test/SFMT-sse2.h
new file mode 100644
index 0000000..169ad55
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-sse2.h
@@ -0,0 +1,157 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT-sse2.h
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * @note We assume LITTLE ENDIAN in this file
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software, see LICENSE.txt
+ */
+
+#ifndef SFMT_SSE2_H
+#define SFMT_SSE2_H
+
+/**
+ * This function represents the recursion formula.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @param mask 128-bit mask
+ * @return output
+ */
+JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
+ __m128i c, __m128i d, __m128i mask) {
+ __m128i v, x, y, z;
+
+ x = _mm_load_si128(a);
+ y = _mm_srli_epi32(*b, SR1);
+ z = _mm_srli_si128(c, SR2);
+ v = _mm_slli_epi32(d, SL1);
+ z = _mm_xor_si128(z, x);
+ z = _mm_xor_si128(z, v);
+ x = _mm_slli_si128(x, SL2);
+ y = _mm_and_si128(y, mask);
+ z = _mm_xor_si128(z, x);
+ z = _mm_xor_si128(z, y);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+static inline void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ __m128i r, r1, r2, mask;
+ mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
+
+ r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
+ r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
+ for (i = 0; i < N - POS1; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
+ mask);
+ _mm_store_si128(&ctx->sfmt[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&ctx->sfmt[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ __m128i r, r1, r2, mask;
+ mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
+
+ r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
+ r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
+ for (i = 0; i < N - POS1; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ r = _mm_load_si128(&array[j + size - N].si);
+ _mm_store_si128(&ctx->sfmt[j].si, r);
+ }
+ for (; i < size; i++) {
+ r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ _mm_store_si128(&ctx->sfmt[j++].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#endif
diff --git a/deps/jemalloc/test/include/test/SFMT.h b/deps/jemalloc/test/include/test/SFMT.h
new file mode 100644
index 0000000..863fc55
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT.h
@@ -0,0 +1,146 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
+ * number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ *
+ * @note We assume that your system has inttypes.h. If your system
+ * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t,
+ * and you have to define PRIu64 and PRIx64 in this file as follows:
+ * @verbatim
+ typedef unsigned int uint32_t
+ typedef unsigned long long uint64_t
+ #define PRIu64 "llu"
+ #define PRIx64 "llx"
+@endverbatim
+ * uint32_t must be exactly 32-bit unsigned integer type (no more, no
+ * less), and uint64_t must be exactly 64-bit unsigned integer type.
+ * PRIu64 and PRIx64 are used for printf function to print 64-bit
+ * unsigned int and 64-bit unsigned int in hexadecimal format.
+ */
+
+#ifndef SFMT_H
+#define SFMT_H
+
+typedef struct sfmt_s sfmt_t;
+
+uint32_t gen_rand32(sfmt_t *ctx);
+uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit);
+uint64_t gen_rand64(sfmt_t *ctx);
+uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit);
+void fill_array32(sfmt_t *ctx, uint32_t *array, int size);
+void fill_array64(sfmt_t *ctx, uint64_t *array, int size);
+sfmt_t *init_gen_rand(uint32_t seed);
+sfmt_t *init_by_array(uint32_t *init_key, int key_length);
+void fini_gen_rand(sfmt_t *ctx);
+const char *get_idstring(void);
+int get_min_array_size32(void);
+int get_min_array_size64(void);
+
+/* These real versions are due to Isaku Wada */
+/** generates a random number on [0,1]-real-interval */
+static inline double to_real1(uint32_t v) {
+ return v * (1.0/4294967295.0);
+ /* divided by 2^32-1 */
+}
+
+/** generates a random number on [0,1]-real-interval */
+static inline double genrand_real1(sfmt_t *ctx) {
+ return to_real1(gen_rand32(ctx));
+}
+
+/** generates a random number on [0,1)-real-interval */
+static inline double to_real2(uint32_t v) {
+ return v * (1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/** generates a random number on [0,1)-real-interval */
+static inline double genrand_real2(sfmt_t *ctx) {
+ return to_real2(gen_rand32(ctx));
+}
+
+/** generates a random number on (0,1)-real-interval */
+static inline double to_real3(uint32_t v) {
+ return (((double)v) + 0.5)*(1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/** generates a random number on (0,1)-real-interval */
+static inline double genrand_real3(sfmt_t *ctx) {
+ return to_real3(gen_rand32(ctx));
+}
+/** These real versions are due to Isaku Wada */
+
+/** generates a random number on [0,1) with 53-bit resolution*/
+static inline double to_res53(uint64_t v) {
+ return v * (1.0/18446744073709551616.0L);
+}
+
+/** generates a random number on [0,1) with 53-bit resolution from two
+ * 32 bit integers */
+static inline double to_res53_mix(uint32_t x, uint32_t y) {
+ return to_res53(x | ((uint64_t)y << 32));
+}
+
+/** generates a random number on [0,1) with 53-bit resolution
+ */
+static inline double genrand_res53(sfmt_t *ctx) {
+ return to_res53(gen_rand64(ctx));
+}
+
+/** generates a random number on [0,1) with 53-bit resolution
+ using 32bit integer.
+ */
+static inline double genrand_res53_mix(sfmt_t *ctx) {
+ uint32_t x, y;
+
+ x = gen_rand32(ctx);
+ y = gen_rand32(ctx);
+ return to_res53_mix(x, y);
+}
+#endif
diff --git a/deps/jemalloc/test/include/test/btalloc.h b/deps/jemalloc/test/include/test/btalloc.h
new file mode 100644
index 0000000..5877ea7
--- /dev/null
+++ b/deps/jemalloc/test/include/test/btalloc.h
@@ -0,0 +1,30 @@
+/* btalloc() provides a mechanism for allocating via permuted backtraces. */
+void *btalloc(size_t size, unsigned bits);
+
+#define btalloc_n_proto(n) \
+void *btalloc_##n(size_t size, unsigned bits);
+btalloc_n_proto(0)
+btalloc_n_proto(1)
+
+#define btalloc_n_gen(n) \
+void * \
+btalloc_##n(size_t size, unsigned bits) { \
+ void *p; \
+ \
+ if (bits == 0) { \
+ p = mallocx(size, 0); \
+ } else { \
+ switch (bits & 0x1U) { \
+ case 0: \
+ p = (btalloc_0(size, bits >> 1)); \
+ break; \
+ case 1: \
+ p = (btalloc_1(size, bits >> 1)); \
+ break; \
+ default: not_reached(); \
+ } \
+ } \
+ /* Intentionally sabotage tail call optimization. */ \
+ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
+ return p; \
+}
diff --git a/deps/jemalloc/test/include/test/extent_hooks.h b/deps/jemalloc/test/include/test/extent_hooks.h
new file mode 100644
index 0000000..1f06201
--- /dev/null
+++ b/deps/jemalloc/test/include/test/extent_hooks.h
@@ -0,0 +1,289 @@
+/*
+ * Boilerplate code used for testing extent hooks via interception and
+ * passthrough.
+ */
+
+static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit,
+ unsigned arena_ind);
+static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks,
+ void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t size_a, size_t size_b, bool committed,
+ unsigned arena_ind);
+static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a,
+ size_t size_a, void *addr_b, size_t size_b, bool committed,
+ unsigned arena_ind);
+
+static extent_hooks_t *default_hooks;
+static extent_hooks_t hooks = {
+ extent_alloc_hook,
+ extent_dalloc_hook,
+ extent_destroy_hook,
+ extent_commit_hook,
+ extent_decommit_hook,
+ extent_purge_lazy_hook,
+ extent_purge_forced_hook,
+ extent_split_hook,
+ extent_merge_hook
+};
+
+/* Control whether hook functions pass calls through to default hooks. */
+static bool try_alloc = true;
+static bool try_dalloc = true;
+static bool try_destroy = true;
+static bool try_commit = true;
+static bool try_decommit = true;
+static bool try_purge_lazy = true;
+static bool try_purge_forced = true;
+static bool try_split = true;
+static bool try_merge = true;
+
+/* Set to false prior to operations, then introspect after operations. */
+static bool called_alloc;
+static bool called_dalloc;
+static bool called_destroy;
+static bool called_commit;
+static bool called_decommit;
+static bool called_purge_lazy;
+static bool called_purge_forced;
+static bool called_split;
+static bool called_merge;
+
+/* Set to false prior to operations, then introspect after operations. */
+static bool did_alloc;
+static bool did_dalloc;
+static bool did_destroy;
+static bool did_commit;
+static bool did_decommit;
+static bool did_purge_lazy;
+static bool did_purge_forced;
+static bool did_split;
+static bool did_merge;
+
+#if 0
+# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
+#else
+# define TRACE_HOOK(fmt, ...)
+#endif
+
+static void *
+extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ void *ret;
+
+ TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, "
+ "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks,
+ new_addr, size, alignment, *zero ? "true" : "false", *commit ?
+ "true" : "false", arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook,
+ "Wrong hook function");
+ called_alloc = true;
+ if (!try_alloc) {
+ return NULL;
+ }
+ ret = default_hooks->alloc(default_hooks, new_addr, size, alignment,
+ zero, commit, 0);
+ did_alloc = (ret != NULL);
+ return ret;
+}
+
+static bool
+extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
+ "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
+ "true" : "false", arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook,
+ "Wrong hook function");
+ called_dalloc = true;
+ if (!try_dalloc) {
+ return true;
+ }
+ err = default_hooks->dalloc(default_hooks, addr, size, committed, 0);
+ did_dalloc = !err;
+ return err;
+}
+
+static void
+extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
+ "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
+ "true" : "false", arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->destroy, extent_destroy_hook,
+ "Wrong hook function");
+ called_destroy = true;
+ if (!try_destroy) {
+ return;
+ }
+ default_hooks->destroy(default_hooks, addr, size, committed, 0);
+ did_destroy = true;
+}
+
+static bool
+extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
+ "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
+ offset, length, arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->commit, extent_commit_hook,
+ "Wrong hook function");
+ called_commit = true;
+ if (!try_commit) {
+ return true;
+ }
+ err = default_hooks->commit(default_hooks, addr, size, offset, length,
+ 0);
+ did_commit = !err;
+ return err;
+}
+
+static bool
+extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
+ "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
+ offset, length, arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook,
+ "Wrong hook function");
+ called_decommit = true;
+ if (!try_decommit) {
+ return true;
+ }
+ err = default_hooks->decommit(default_hooks, addr, size, offset, length,
+ 0);
+ did_decommit = !err;
+ return err;
+}
+
+static bool
+extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
+ "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
+ offset, length, arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook,
+ "Wrong hook function");
+ called_purge_lazy = true;
+ if (!try_purge_lazy) {
+ return true;
+ }
+ err = default_hooks->purge_lazy == NULL ||
+ default_hooks->purge_lazy(default_hooks, addr, size, offset, length,
+ 0);
+ did_purge_lazy = !err;
+ return err;
+}
+
+static bool
+extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
+ "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
+ offset, length, arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook,
+ "Wrong hook function");
+ called_purge_forced = true;
+ if (!try_purge_forced) {
+ return true;
+ }
+ err = default_hooks->purge_forced == NULL ||
+ default_hooks->purge_forced(default_hooks, addr, size, offset,
+ length, 0);
+ did_purge_forced = !err;
+ return err;
+}
+
+static bool
+extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, "
+ "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
+ addr, size, size_a, size_b, committed ? "true" : "false",
+ arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->split, extent_split_hook,
+ "Wrong hook function");
+ called_split = true;
+ if (!try_split) {
+ return true;
+ }
+ err = (default_hooks->split == NULL ||
+ default_hooks->split(default_hooks, addr, size, size_a, size_b,
+ committed, 0));
+ did_split = !err;
+ return err;
+}
+
+static bool
+extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ bool err;
+
+ TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p "
+ "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
+ addr_a, size_a, addr_b, size_b, committed ? "true" : "false",
+ arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->merge, extent_merge_hook,
+ "Wrong hook function");
+ assert_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b,
+ "Extents not mergeable");
+ called_merge = true;
+ if (!try_merge) {
+ return true;
+ }
+ err = (default_hooks->merge == NULL ||
+ default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b,
+ committed, 0));
+ did_merge = !err;
+ return err;
+}
+
+static void
+extent_hooks_prep(void) {
+ size_t sz;
+
+ sz = sizeof(default_hooks);
+ assert_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz,
+ NULL, 0), 0, "Unexpected mallctl() error");
+}
diff --git a/deps/jemalloc/test/include/test/jemalloc_test.h.in b/deps/jemalloc/test/include/test/jemalloc_test.h.in
new file mode 100644
index 0000000..c46af5d
--- /dev/null
+++ b/deps/jemalloc/test/include/test/jemalloc_test.h.in
@@ -0,0 +1,173 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <limits.h>
+#ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+#endif
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <math.h>
+#include <string.h>
+#ifdef _WIN32
+# include "msvc_compat/strings.h"
+#endif
+
+#ifdef _WIN32
+# include <windows.h>
+# include "msvc_compat/windows_extra.h"
+#else
+# include <pthread.h>
+#endif
+
+#include "test/jemalloc_test_defs.h"
+
+#if defined(JEMALLOC_OSATOMIC)
+# include <libkern/OSAtomic.h>
+#endif
+
+#if defined(HAVE_ALTIVEC) && !defined(__APPLE__)
+# include <altivec.h>
+#endif
+#ifdef HAVE_SSE2
+# include <emmintrin.h>
+#endif
+
+/******************************************************************************/
+/*
+ * For unit tests, expose all public and private interfaces.
+ */
+#ifdef JEMALLOC_UNIT_TEST
+# define JEMALLOC_JET
+# define JEMALLOC_MANGLE
+# include "jemalloc/internal/jemalloc_preamble.h"
+# include "jemalloc/internal/jemalloc_internal_includes.h"
+
+/******************************************************************************/
+/*
+ * For integration tests, expose the public jemalloc interfaces, but only
+ * expose the minimum necessary internal utility code (to avoid re-implementing
+ * essentially identical code within the test infrastructure).
+ */
+#elif defined(JEMALLOC_INTEGRATION_TEST) || \
+ defined(JEMALLOC_INTEGRATION_CPP_TEST)
+# define JEMALLOC_MANGLE
+# include "jemalloc/jemalloc@install_suffix@.h"
+# include "jemalloc/internal/jemalloc_internal_defs.h"
+# include "jemalloc/internal/jemalloc_internal_macros.h"
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+
+# define JEMALLOC_N(n) @private_namespace@##n
+# include "jemalloc/internal/private_namespace.h"
+# include "jemalloc/internal/test_hooks.h"
+
+/* Hermetic headers. */
+# include "jemalloc/internal/assert.h"
+# include "jemalloc/internal/malloc_io.h"
+# include "jemalloc/internal/nstime.h"
+# include "jemalloc/internal/util.h"
+
+/* Non-hermetic headers. */
+# include "jemalloc/internal/qr.h"
+# include "jemalloc/internal/ql.h"
+
+/******************************************************************************/
+/*
+ * For stress tests, expose the public jemalloc interfaces with name mangling
+ * so that they can be tested as e.g. malloc() and free(). Also expose the
+ * public jemalloc interfaces with jet_ prefixes, so that stress tests can use
+ * a separate allocator for their internal data structures.
+ */
+#elif defined(JEMALLOC_STRESS_TEST)
+# include "jemalloc/jemalloc@install_suffix@.h"
+
+# include "jemalloc/jemalloc_protos_jet.h"
+
+# define JEMALLOC_JET
+# include "jemalloc/internal/jemalloc_preamble.h"
+# include "jemalloc/internal/jemalloc_internal_includes.h"
+# include "jemalloc/internal/public_unnamespace.h"
+# undef JEMALLOC_JET
+
+# include "jemalloc/jemalloc_rename.h"
+# define JEMALLOC_MANGLE
+# ifdef JEMALLOC_STRESS_TESTLIB
+# include "jemalloc/jemalloc_mangle_jet.h"
+# else
+# include "jemalloc/jemalloc_mangle.h"
+# endif
+
+/******************************************************************************/
+/*
+ * This header does dangerous things, the effects of which only test code
+ * should be subject to.
+ */
+#else
+# error "This header cannot be included outside a testing context"
+#endif
+
+/******************************************************************************/
+/*
+ * Common test utilities.
+ */
+#include "test/btalloc.h"
+#include "test/math.h"
+#include "test/mtx.h"
+#include "test/mq.h"
+#include "test/test.h"
+#include "test/timer.h"
+#include "test/thd.h"
+#define MEXP 19937
+#include "test/SFMT.h"
+
+/******************************************************************************/
+/*
+ * Define always-enabled assertion macros, so that test assertions execute even
+ * if assertions are disabled in the library code.
+ */
+#undef assert
+#undef not_reached
+#undef not_implemented
+#undef assert_not_implemented
+
+#define assert(e) do { \
+ if (!(e)) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define not_implemented() do { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define assert_not_implemented(e) do { \
+ if (!(e)) { \
+ not_implemented(); \
+ } \
+} while (0)
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
new file mode 100644
index 0000000..5cc8532
--- /dev/null
+++ b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
@@ -0,0 +1,9 @@
+#include "jemalloc/internal/jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
+
+/*
+ * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its
+ * dependencies are notoriously unportable in practice.
+ */
+#undef HAVE_SSE2
+#undef HAVE_ALTIVEC
diff --git a/deps/jemalloc/test/include/test/math.h b/deps/jemalloc/test/include/test/math.h
new file mode 100644
index 0000000..efba086
--- /dev/null
+++ b/deps/jemalloc/test/include/test/math.h
@@ -0,0 +1,306 @@
+/*
+ * Compute the natural log of Gamma(x), accurate to 10 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
+ * [S14]. Communications of the ACM 9(9):684.
+ */
+static inline double
+ln_gamma(double x) {
+ double f, z;
+
+ assert(x > 0.0);
+
+ if (x < 7.0) {
+ f = 1.0;
+ z = x;
+ while (z < 7.0) {
+ f *= z;
+ z += 1.0;
+ }
+ x = z;
+ f = -log(f);
+ } else {
+ f = 0.0;
+ }
+
+ z = 1.0 / (x * x);
+
+ return f + (x-0.5) * log(x) - x + 0.918938533204673 +
+ (((-0.000595238095238 * z + 0.000793650793651) * z -
+ 0.002777777777778) * z + 0.083333333333333) / x;
+}
+
+/*
+ * Compute the incomplete Gamma ratio for [0..x], where p is the shape
+ * parameter, and ln_gamma_p is ln_gamma(p).
+ *
+ * This implementation is based on:
+ *
+ * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
+ * Applied Statistics 19:285-287.
+ */
+static inline double
+i_gamma(double x, double p, double ln_gamma_p) {
+ double acu, factor, oflo, gin, term, rn, a, b, an, dif;
+ double pn[6];
+ unsigned i;
+
+ assert(p > 0.0);
+ assert(x >= 0.0);
+
+ if (x == 0.0) {
+ return 0.0;
+ }
+
+ acu = 1.0e-10;
+ oflo = 1.0e30;
+ gin = 0.0;
+ factor = exp(p * log(x) - x - ln_gamma_p);
+
+ if (x <= 1.0 || x < p) {
+ /* Calculation by series expansion. */
+ gin = 1.0;
+ term = 1.0;
+ rn = p;
+
+ while (true) {
+ rn += 1.0;
+ term *= x / rn;
+ gin += term;
+ if (term <= acu) {
+ gin *= factor / p;
+ return gin;
+ }
+ }
+ } else {
+ /* Calculation by continued fraction. */
+ a = 1.0 - p;
+ b = a + x + 1.0;
+ term = 0.0;
+ pn[0] = 1.0;
+ pn[1] = x;
+ pn[2] = x + 1.0;
+ pn[3] = x * b;
+ gin = pn[2] / pn[3];
+
+ while (true) {
+ a += 1.0;
+ b += 2.0;
+ term += 1.0;
+ an = a * term;
+ for (i = 0; i < 2; i++) {
+ pn[i+4] = b * pn[i+2] - an * pn[i];
+ }
+ if (pn[5] != 0.0) {
+ rn = pn[4] / pn[5];
+ dif = fabs(gin - rn);
+ if (dif <= acu && dif <= acu * rn) {
+ gin = 1.0 - factor * gin;
+ return gin;
+ }
+ gin = rn;
+ }
+ for (i = 0; i < 4; i++) {
+ pn[i] = pn[i+2];
+ }
+
+ if (fabs(pn[4]) >= oflo) {
+ for (i = 0; i < 4; i++) {
+ pn[i] /= oflo;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Given a value p in [0..1] of the lower tail area of the normal distribution,
+ * compute the limit on the definite integral from [-inf..z] that satisfies p,
+ * accurate to 16 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
+ * distribution. Applied Statistics 37(3):477-484.
+ */
+static inline double
+pt_norm(double p) {
+ double q, r, ret;
+
+ assert(p > 0.0 && p < 1.0);
+
+ q = p - 0.5;
+ if (fabs(q) <= 0.425) {
+ /* p close to 1/2. */
+ r = 0.180625 - q * q;
+ return q * (((((((2.5090809287301226727e3 * r +
+ 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
+ + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
+ r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
+ * r + 3.3871328727963666080e0) /
+ (((((((5.2264952788528545610e3 * r +
+ 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
+ + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
+ r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
+ * r + 1.0);
+ } else {
+ if (q < 0.0) {
+ r = p;
+ } else {
+ r = 1.0 - p;
+ }
+ assert(r > 0.0);
+
+ r = sqrt(-log(r));
+ if (r <= 5.0) {
+ /* p neither close to 1/2 nor 0 or 1. */
+ r -= 1.6;
+ ret = ((((((((7.74545014278341407640e-4 * r +
+ 2.27238449892691845833e-2) * r +
+ 2.41780725177450611770e-1) * r +
+ 1.27045825245236838258e0) * r +
+ 3.64784832476320460504e0) * r +
+ 5.76949722146069140550e0) * r +
+ 4.63033784615654529590e0) * r +
+ 1.42343711074968357734e0) /
+ (((((((1.05075007164441684324e-9 * r +
+ 5.47593808499534494600e-4) * r +
+ 1.51986665636164571966e-2)
+ * r + 1.48103976427480074590e-1) * r +
+ 6.89767334985100004550e-1) * r +
+ 1.67638483018380384940e0) * r +
+ 2.05319162663775882187e0) * r + 1.0));
+ } else {
+ /* p near 0 or 1. */
+ r -= 5.0;
+ ret = ((((((((2.01033439929228813265e-7 * r +
+ 2.71155556874348757815e-5) * r +
+ 1.24266094738807843860e-3) * r +
+ 2.65321895265761230930e-2) * r +
+ 2.96560571828504891230e-1) * r +
+ 1.78482653991729133580e0) * r +
+ 5.46378491116411436990e0) * r +
+ 6.65790464350110377720e0) /
+ (((((((2.04426310338993978564e-15 * r +
+ 1.42151175831644588870e-7) * r +
+ 1.84631831751005468180e-5) * r +
+ 7.86869131145613259100e-4) * r +
+ 1.48753612908506148525e-2) * r +
+ 1.36929880922735805310e-1) * r +
+ 5.99832206555887937690e-1)
+ * r + 1.0));
+ }
+ if (q < 0.0) {
+ ret = -ret;
+ }
+ return ret;
+ }
+}
+
+/*
+ * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution
+ * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute
+ * the upper limit on the definite integral from [0..z] that satisfies p,
+ * accurate to 12 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of
+ * the Chi^2 distribution. Applied Statistics 24(3):385-388.
+ *
+ * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
+ * points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
+ */
+static inline double
+pt_chi2(double p, double df, double ln_gamma_df_2) {
+ double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
+ unsigned i;
+
+ assert(p >= 0.0 && p < 1.0);
+ assert(df > 0.0);
+
+ e = 5.0e-7;
+ aa = 0.6931471805;
+
+ xx = 0.5 * df;
+ c = xx - 1.0;
+
+ if (df < -1.24 * log(p)) {
+ /* Starting approximation for small Chi^2. */
+ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
+ if (ch - e < 0.0) {
+ return ch;
+ }
+ } else {
+ if (df > 0.32) {
+ x = pt_norm(p);
+ /*
+ * Starting approximation using Wilson and Hilferty
+ * estimate.
+ */
+ p1 = 0.222222 / df;
+ ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0);
+ /* Starting approximation for p tending to 1. */
+ if (ch > 2.2 * df + 6.0) {
+ ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) +
+ ln_gamma_df_2);
+ }
+ } else {
+ ch = 0.4;
+ a = log(1.0 - p);
+ while (true) {
+ q = ch;
+ p1 = 1.0 + ch * (4.67 + ch);
+ p2 = ch * (6.73 + ch * (6.66 + ch));
+ t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch
+ * (13.32 + 3.0 * ch)) / p2;
+ ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
+ c * aa) * p2 / p1) / t;
+ if (fabs(q / ch - 1.0) - 0.01 <= 0.0) {
+ break;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < 20; i++) {
+ /* Calculation of seven-term Taylor series. */
+ q = ch;
+ p1 = 0.5 * ch;
+ if (p1 < 0.0) {
+ return -1.0;
+ }
+ p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
+ t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
+ b = t / ch;
+ a = 0.5 * t - b * c;
+ s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 +
+ 60.0 * a))))) / 420.0;
+ s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 *
+ a)))) / 2520.0;
+ s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0;
+ s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a *
+ (889.0 + 1740.0 * a))) / 5040.0;
+ s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0;
+ s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
+ ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
+ - b * (s4 - b * (s5 - b * s6))))));
+ if (fabs(q / ch - 1.0) <= e) {
+ break;
+ }
+ }
+
+ return ch;
+}
+
+/*
+ * Given a value p in [0..1] and Gamma distribution shape and scale parameters,
+ * compute the upper limit on the definite integral from [0..z] that satisfies
+ * p.
+ */
+static inline double
+pt_gamma(double p, double shape, double scale, double ln_gamma_shape) {
+ return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale;
+}
diff --git a/deps/jemalloc/test/include/test/mq.h b/deps/jemalloc/test/include/test/mq.h
new file mode 100644
index 0000000..af2c078
--- /dev/null
+++ b/deps/jemalloc/test/include/test/mq.h
@@ -0,0 +1,107 @@
+void mq_nanosleep(unsigned ns);
+
+/*
+ * Simple templated message queue implementation that relies on only mutexes for
+ * synchronization (which reduces portability issues). Given the following
+ * setup:
+ *
+ * typedef struct mq_msg_s mq_msg_t;
+ * struct mq_msg_s {
+ * mq_msg(mq_msg_t) link;
+ * [message data]
+ * };
+ * mq_gen(, mq_, mq_t, mq_msg_t, link)
+ *
+ * The API is as follows:
+ *
+ * bool mq_init(mq_t *mq);
+ * void mq_fini(mq_t *mq);
+ * unsigned mq_count(mq_t *mq);
+ * mq_msg_t *mq_tryget(mq_t *mq);
+ * mq_msg_t *mq_get(mq_t *mq);
+ * void mq_put(mq_t *mq, mq_msg_t *msg);
+ *
+ * The message queue linkage embedded in each message is to be treated as
+ * externally opaque (no need to initialize or clean up externally). mq_fini()
+ * does not perform any cleanup of messages, since it knows nothing of their
+ * payloads.
+ */
+#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type)
+
+#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \
+typedef struct { \
+ mtx_t lock; \
+ ql_head(a_mq_msg_type) msgs; \
+ unsigned count; \
+} a_mq_type; \
+a_attr bool \
+a_prefix##init(a_mq_type *mq) { \
+ \
+ if (mtx_init(&mq->lock)) { \
+ return true; \
+ } \
+ ql_new(&mq->msgs); \
+ mq->count = 0; \
+ return false; \
+} \
+a_attr void \
+a_prefix##fini(a_mq_type *mq) { \
+ mtx_fini(&mq->lock); \
+} \
+a_attr unsigned \
+a_prefix##count(a_mq_type *mq) { \
+ unsigned count; \
+ \
+ mtx_lock(&mq->lock); \
+ count = mq->count; \
+ mtx_unlock(&mq->lock); \
+ return count; \
+} \
+a_attr a_mq_msg_type * \
+a_prefix##tryget(a_mq_type *mq) { \
+ a_mq_msg_type *msg; \
+ \
+ mtx_lock(&mq->lock); \
+ msg = ql_first(&mq->msgs); \
+ if (msg != NULL) { \
+ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \
+ mq->count--; \
+ } \
+ mtx_unlock(&mq->lock); \
+ return msg; \
+} \
+a_attr a_mq_msg_type * \
+a_prefix##get(a_mq_type *mq) { \
+ a_mq_msg_type *msg; \
+ unsigned ns; \
+ \
+ msg = a_prefix##tryget(mq); \
+ if (msg != NULL) { \
+ return msg; \
+ } \
+ \
+ ns = 1; \
+ while (true) { \
+ mq_nanosleep(ns); \
+ msg = a_prefix##tryget(mq); \
+ if (msg != NULL) { \
+ return msg; \
+ } \
+ if (ns < 1000*1000*1000) { \
+ /* Double sleep time, up to max 1 second. */ \
+ ns <<= 1; \
+ if (ns > 1000*1000*1000) { \
+ ns = 1000*1000*1000; \
+ } \
+ } \
+ } \
+} \
+a_attr void \
+a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \
+ \
+ mtx_lock(&mq->lock); \
+ ql_elm_new(msg, a_field); \
+ ql_tail_insert(&mq->msgs, msg, a_field); \
+ mq->count++; \
+ mtx_unlock(&mq->lock); \
+}
diff --git a/deps/jemalloc/test/include/test/mtx.h b/deps/jemalloc/test/include/test/mtx.h
new file mode 100644
index 0000000..066a213
--- /dev/null
+++ b/deps/jemalloc/test/include/test/mtx.h
@@ -0,0 +1,21 @@
+/*
+ * mtx is a slightly simplified version of malloc_mutex. This code duplication
+ * is unfortunate, but there are allocator bootstrapping considerations that
+ * would leak into the test infrastructure if malloc_mutex were used directly
+ * in tests.
+ */
+
+typedef struct {
+#ifdef _WIN32
+ CRITICAL_SECTION lock;
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock lock;
+#else
+ pthread_mutex_t lock;
+#endif
+} mtx_t;
+
+bool mtx_init(mtx_t *mtx);
+void mtx_fini(mtx_t *mtx);
+void mtx_lock(mtx_t *mtx);
+void mtx_unlock(mtx_t *mtx);
diff --git a/deps/jemalloc/test/include/test/test.h b/deps/jemalloc/test/include/test/test.h
new file mode 100644
index 0000000..fd0e526
--- /dev/null
+++ b/deps/jemalloc/test/include/test/test.h
@@ -0,0 +1,338 @@
+#define ASSERT_BUFSIZE 256
+
+#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \
+ t a_ = (a); \
+ t b_ = (b); \
+ if (!(a_ cmp b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) " #cmp " (%s) --> " \
+ "%" pri " " #neg_cmp " %" pri ": ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_, b_); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+
+#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \
+ !=, "p", __VA_ARGS__)
+#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \
+ ==, "p", __VA_ARGS__)
+#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \
+ !=, "p", __VA_ARGS__)
+#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \
+ ==, "p", __VA_ARGS__)
+
+#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
+#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
+#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
+#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
+#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
+#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
+
+#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
+#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
+#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
+#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
+#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
+#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
+
+#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
+#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
+#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
+#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
+#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
+#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
+
+#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
+#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
+#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
+#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
+#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
+#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
+
+#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \
+ !=, "ld", __VA_ARGS__)
+#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \
+ ==, "ld", __VA_ARGS__)
+#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \
+ >=, "ld", __VA_ARGS__)
+#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \
+ >, "ld", __VA_ARGS__)
+#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \
+ <, "ld", __VA_ARGS__)
+#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \
+ <=, "ld", __VA_ARGS__)
+
+#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \
+ a, b, ==, !=, "lu", __VA_ARGS__)
+#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \
+ a, b, !=, ==, "lu", __VA_ARGS__)
+#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \
+ a, b, <, >=, "lu", __VA_ARGS__)
+#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \
+ a, b, <=, >, "lu", __VA_ARGS__)
+#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \
+ a, b, >=, <, "lu", __VA_ARGS__)
+#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \
+ a, b, >, <=, "lu", __VA_ARGS__)
+
+#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \
+ !=, "qd", __VA_ARGS__)
+#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \
+ ==, "qd", __VA_ARGS__)
+#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \
+ >=, "qd", __VA_ARGS__)
+#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \
+ >, "qd", __VA_ARGS__)
+#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \
+ <, "qd", __VA_ARGS__)
+#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \
+ <=, "qd", __VA_ARGS__)
+
+#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, ==, !=, "qu", __VA_ARGS__)
+#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, !=, ==, "qu", __VA_ARGS__)
+#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, <, >=, "qu", __VA_ARGS__)
+#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, <=, >, "qu", __VA_ARGS__)
+#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, >=, <, "qu", __VA_ARGS__)
+#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, >, <=, "qu", __VA_ARGS__)
+
+#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \
+ !=, "jd", __VA_ARGS__)
+#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \
+ ==, "jd", __VA_ARGS__)
+#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \
+ >=, "jd", __VA_ARGS__)
+#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \
+ >, "jd", __VA_ARGS__)
+#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \
+ <, "jd", __VA_ARGS__)
+#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \
+ <=, "jd", __VA_ARGS__)
+
+#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \
+ !=, "ju", __VA_ARGS__)
+#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \
+ ==, "ju", __VA_ARGS__)
+#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \
+ >=, "ju", __VA_ARGS__)
+#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \
+ >, "ju", __VA_ARGS__)
+#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \
+ <, "ju", __VA_ARGS__)
+#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \
+ <=, "ju", __VA_ARGS__)
+
+#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \
+ !=, "zd", __VA_ARGS__)
+#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \
+ ==, "zd", __VA_ARGS__)
+#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \
+ >=, "zd", __VA_ARGS__)
+#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \
+ >, "zd", __VA_ARGS__)
+#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \
+ <, "zd", __VA_ARGS__)
+#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \
+ <=, "zd", __VA_ARGS__)
+
+#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \
+ !=, "zu", __VA_ARGS__)
+#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \
+ ==, "zu", __VA_ARGS__)
+#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \
+ >=, "zu", __VA_ARGS__)
+#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \
+ >, "zu", __VA_ARGS__)
+#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \
+ <, "zu", __VA_ARGS__)
+#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \
+ <=, "zu", __VA_ARGS__)
+
+#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \
+ !=, FMTd32, __VA_ARGS__)
+#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \
+ ==, FMTd32, __VA_ARGS__)
+#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \
+ >=, FMTd32, __VA_ARGS__)
+#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \
+ >, FMTd32, __VA_ARGS__)
+#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \
+ <, FMTd32, __VA_ARGS__)
+#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \
+ <=, FMTd32, __VA_ARGS__)
+
+#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \
+ !=, FMTu32, __VA_ARGS__)
+#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \
+ ==, FMTu32, __VA_ARGS__)
+#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \
+ >=, FMTu32, __VA_ARGS__)
+#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \
+ >, FMTu32, __VA_ARGS__)
+#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \
+ <, FMTu32, __VA_ARGS__)
+#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \
+ <=, FMTu32, __VA_ARGS__)
+
+#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \
+ !=, FMTd64, __VA_ARGS__)
+#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \
+ ==, FMTd64, __VA_ARGS__)
+#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \
+ >=, FMTd64, __VA_ARGS__)
+#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \
+ >, FMTd64, __VA_ARGS__)
+#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \
+ <, FMTd64, __VA_ARGS__)
+#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \
+ <=, FMTd64, __VA_ARGS__)
+
+#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \
+ !=, FMTu64, __VA_ARGS__)
+#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \
+ ==, FMTu64, __VA_ARGS__)
+#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \
+ >=, FMTu64, __VA_ARGS__)
+#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \
+ >, FMTu64, __VA_ARGS__)
+#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \
+ <, FMTu64, __VA_ARGS__)
+#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \
+ <=, FMTu64, __VA_ARGS__)
+
+#define assert_b_eq(a, b, ...) do { \
+ bool a_ = (a); \
+ bool b_ = (b); \
+ if (!(a_ == b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) == (%s) --> %s != %s: ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_ ? "true" : "false", \
+ b_ ? "true" : "false"); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_b_ne(a, b, ...) do { \
+ bool a_ = (a); \
+ bool b_ = (b); \
+ if (!(a_ != b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) != (%s) --> %s == %s: ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_ ? "true" : "false", \
+ b_ ? "true" : "false"); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__)
+#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__)
+
+#define assert_str_eq(a, b, ...) do { \
+ if (strcmp((a), (b))) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) same as (%s) --> " \
+ "\"%s\" differs from \"%s\": ", \
+ __func__, __FILE__, __LINE__, #a, #b, a, b); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_str_ne(a, b, ...) do { \
+ if (!strcmp((a), (b))) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) differs from (%s) --> " \
+ "\"%s\" same as \"%s\": ", \
+ __func__, __FILE__, __LINE__, #a, #b, a, b); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+
+#define assert_not_reached(...) do { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Unreachable code reached: ", \
+ __func__, __FILE__, __LINE__); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
+ p_test_fail(prefix, message); \
+} while (0)
+
+/*
+ * If this enum changes, corresponding changes in test/test.sh.in are also
+ * necessary.
+ */
+typedef enum {
+ test_status_pass = 0,
+ test_status_skip = 1,
+ test_status_fail = 2,
+
+ test_status_count = 3
+} test_status_t;
+
+typedef void (test_t)(void);
+
+#define TEST_BEGIN(f) \
+static void \
+f(void) { \
+ p_test_init(#f);
+
+#define TEST_END \
+ goto label_test_end; \
+label_test_end: \
+ p_test_fini(); \
+}
+
+#define test(...) \
+ p_test(__VA_ARGS__, NULL)
+
+#define test_no_reentrancy(...) \
+ p_test_no_reentrancy(__VA_ARGS__, NULL)
+
+#define test_no_malloc_init(...) \
+ p_test_no_malloc_init(__VA_ARGS__, NULL)
+
+#define test_skip_if(e) do { \
+ if (e) { \
+ test_skip("%s:%s:%d: Test skipped: (%s)", \
+ __func__, __FILE__, __LINE__, #e); \
+ goto label_test_end; \
+ } \
+} while (0)
+
+bool test_is_reentrant();
+
+void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+
+/* For private use by macros. */
+test_status_t p_test(test_t *t, ...);
+test_status_t p_test_no_reentrancy(test_t *t, ...);
+test_status_t p_test_no_malloc_init(test_t *t, ...);
+void p_test_init(const char *name);
+void p_test_fini(void);
+void p_test_fail(const char *prefix, const char *message);
diff --git a/deps/jemalloc/test/include/test/thd.h b/deps/jemalloc/test/include/test/thd.h
new file mode 100644
index 0000000..47a5126
--- /dev/null
+++ b/deps/jemalloc/test/include/test/thd.h
@@ -0,0 +1,9 @@
+/* Abstraction layer for threading in tests. */
+#ifdef _WIN32
+typedef HANDLE thd_t;
+#else
+typedef pthread_t thd_t;
+#endif
+
+void thd_create(thd_t *thd, void *(*proc)(void *), void *arg);
+void thd_join(thd_t thd, void **ret);
diff --git a/deps/jemalloc/test/include/test/timer.h b/deps/jemalloc/test/include/test/timer.h
new file mode 100644
index 0000000..ace6191
--- /dev/null
+++ b/deps/jemalloc/test/include/test/timer.h
@@ -0,0 +1,11 @@
+/* Simple timer, for use in benchmark reporting. */
+
+typedef struct {
+ nstime_t t0;
+ nstime_t t1;
+} timedelta_t;
+
+void timer_start(timedelta_t *timer);
+void timer_stop(timedelta_t *timer);
+uint64_t timer_usec(const timedelta_t *timer);
+void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen);
diff --git a/deps/jemalloc/test/integration/MALLOCX_ARENA.c b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
new file mode 100644
index 0000000..222164d
--- /dev/null
+++ b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
@@ -0,0 +1,66 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+static bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg) {
+ unsigned thread_ind = (unsigned)(uintptr_t)arg;
+ unsigned arena_ind;
+ void *p;
+ size_t sz;
+
+ sz = sizeof(arena_ind);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Error in arenas.create");
+
+ if (thread_ind % 4 != 3) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ const char *dss_precs[] = {"disabled", "primary", "secondary"};
+ unsigned prec_ind = thread_ind %
+ (sizeof(dss_precs)/sizeof(char*));
+ const char *dss = dss_precs[prec_ind];
+ int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
+ assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Error in mallctlnametomib()");
+ mib[1] = arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
+ sizeof(const char *)), expected_err,
+ "Error in mallctlbymib()");
+ }
+
+ p = mallocx(1, MALLOCX_ARENA(arena_ind));
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ return NULL;
+}
+
+TEST_BEGIN(test_MALLOCX_ARENA) {
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)(uintptr_t)i);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_MALLOCX_ARENA);
+}
diff --git a/deps/jemalloc/test/integration/aligned_alloc.c b/deps/jemalloc/test/integration/aligned_alloc.c
new file mode 100644
index 0000000..4375b17
--- /dev/null
+++ b/deps/jemalloc/test/integration/aligned_alloc.c
@@ -0,0 +1,157 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors) {
+ size_t alignment;
+ void *p;
+
+ alignment = 0;
+ set_errno(0);
+ p = aligned_alloc(alignment, 1);
+ assert_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu", alignment);
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ set_errno(0);
+ p = aligned_alloc(alignment + 1, 1);
+ assert_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_oom_errors) {
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_alignment_and_size) {
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 1;
+ size < 3 * alignment && size < (1U << 31);
+ size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ ps[i] = aligned_alloc(alignment, size);
+ if (ps[i] == NULL) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += malloc_usable_size(ps[i]);
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+TEST_BEGIN(test_zero_alloc) {
+ void *res = aligned_alloc(8, 0);
+ assert(res);
+ size_t usable = malloc_usable_size(res);
+ assert(usable > 0);
+ free(res);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size,
+ test_zero_alloc);
+}
diff --git a/deps/jemalloc/test/integration/allocated.c b/deps/jemalloc/test/integration/allocated.c
new file mode 100644
index 0000000..1425fd0
--- /dev/null
+++ b/deps/jemalloc/test/integration/allocated.c
@@ -0,0 +1,124 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg) {
+ int err;
+ void *p;
+ uint64_t a0, a1, d0, d1;
+ uint64_t *ap0, *ap1, *dp0, *dp1;
+ size_t sz, usize;
+
+ sz = sizeof(a0);
+ if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(ap0);
+ if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ assert_u64_eq(*ap0, a0,
+ "\"thread.allocatedp\" should provide a pointer to internal "
+ "storage");
+
+ sz = sizeof(d0);
+ if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(dp0);
+ if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL,
+ 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ assert_u64_eq(*dp0, d0,
+ "\"thread.deallocatedp\" should provide a pointer to internal "
+ "storage");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() error");
+
+ sz = sizeof(a1);
+ mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0);
+ sz = sizeof(ap1);
+ mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0);
+ assert_u64_eq(*ap1, a1,
+ "Dereferenced \"thread.allocatedp\" value should equal "
+ "\"thread.allocated\" value");
+ assert_ptr_eq(ap0, ap1,
+ "Pointer returned by \"thread.allocatedp\" should not change");
+
+ usize = malloc_usable_size(p);
+ assert_u64_le(a0 + usize, a1,
+ "Allocated memory counter should increase by at least the amount "
+ "explicitly allocated");
+
+ free(p);
+
+ sz = sizeof(d1);
+ mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0);
+ sz = sizeof(dp1);
+ mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0);
+ assert_u64_eq(*dp1, d1,
+ "Dereferenced \"thread.deallocatedp\" value should equal "
+ "\"thread.deallocated\" value");
+ assert_ptr_eq(dp0, dp1,
+ "Pointer returned by \"thread.deallocatedp\" should not change");
+
+ assert_u64_le(d0 + usize, d1,
+ "Deallocated memory counter should increase by at least the amount "
+ "explicitly deallocated");
+
+ return NULL;
+label_ENOENT:
+ assert_false(config_stats,
+ "ENOENT should only be returned if stats are disabled");
+ test_skip("\"thread.allocated\" mallctl not available");
+ return NULL;
+}
+
+TEST_BEGIN(test_main_thread) {
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread) {
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void) {
+ /* Run tests multiple times to check for bad interactions. */
+ return test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread);
+}
diff --git a/deps/jemalloc/test/integration/cpp/basic.cpp b/deps/jemalloc/test/integration/cpp/basic.cpp
new file mode 100644
index 0000000..65890ec
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/basic.cpp
@@ -0,0 +1,25 @@
+#include <memory>
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_basic) {
+ auto foo = new long(4);
+ assert_ptr_not_null(foo, "Unexpected new[] failure");
+ delete foo;
+ // Test nullptr handling.
+ foo = nullptr;
+ delete foo;
+
+ auto bar = new long;
+ assert_ptr_not_null(bar, "Unexpected new failure");
+ delete bar;
+ // Test nullptr handling.
+ bar = nullptr;
+ delete bar;
+}
+TEST_END
+
+int
+main() {
+ return test(
+ test_basic);
+}
diff --git a/deps/jemalloc/test/integration/extent.c b/deps/jemalloc/test/integration/extent.c
new file mode 100644
index 0000000..b5db087
--- /dev/null
+++ b/deps/jemalloc/test/integration/extent.c
@@ -0,0 +1,248 @@
+#include "test/jemalloc_test.h"
+
+#include "test/extent_hooks.h"
+
+static bool
+check_background_thread_enabled(void) {
+ bool enabled;
+ size_t sz = sizeof(bool);
+ int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0);
+ if (ret == ENOENT) {
+ return false;
+ }
+ assert_d_eq(ret, 0, "Unexpected mallctl error");
+ return enabled;
+}
+
+static void
+test_extent_body(unsigned arena_ind) {
+ void *p;
+ size_t large0, large1, large2, sz;
+ size_t purge_mib[3];
+ size_t purge_miblen;
+ int flags;
+ bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
+
+ flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ /* Get large size classes. */
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.0.size failure");
+ assert_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.1.size failure");
+ assert_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.2.size failure");
+
+ /* Test dalloc/decommit/purge cascade. */
+ purge_miblen = sizeof(purge_mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ purge_mib[1] = (size_t)arena_ind;
+ called_alloc = false;
+ try_alloc = true;
+ try_dalloc = false;
+ try_decommit = false;
+ p = mallocx(large0 * 2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ assert_true(called_alloc, "Expected alloc call");
+ called_dalloc = false;
+ called_decommit = false;
+ did_purge_lazy = false;
+ did_purge_forced = false;
+ called_split = false;
+ xallocx_success_a = (xallocx(p, large0, 0, flags) == large0);
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_a) {
+ assert_true(called_dalloc, "Expected dalloc call");
+ assert_true(called_decommit, "Expected decommit call");
+ assert_true(did_purge_lazy || did_purge_forced,
+ "Expected purge");
+ }
+ assert_true(called_split, "Expected split call");
+ dallocx(p, flags);
+ try_dalloc = true;
+
+ /* Test decommit/commit and observe split/merge. */
+ try_dalloc = false;
+ try_decommit = true;
+ p = mallocx(large0 * 2, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_decommit = false;
+ did_commit = false;
+ called_split = false;
+ did_split = false;
+ did_merge = false;
+ xallocx_success_b = (xallocx(p, large0, 0, flags) == large0);
+ assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_b) {
+ assert_true(did_split, "Expected split");
+ }
+ xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2);
+ if (did_split) {
+ assert_b_eq(did_decommit, did_commit,
+ "Expected decommit/commit match");
+ }
+ if (xallocx_success_b && xallocx_success_c) {
+ assert_true(did_merge, "Expected merge");
+ }
+ dallocx(p, flags);
+ try_dalloc = true;
+ try_decommit = false;
+
+ /* Make sure non-large allocation succeeds. */
+ p = mallocx(42, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, flags);
+}
+
+static void
+test_manual_hook_auto_arena(void) {
+ unsigned narenas;
+ size_t old_size, new_size, sz;
+ size_t hooks_mib[3];
+ size_t hooks_miblen;
+ extent_hooks_t *new_hooks, *old_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ /* Get number of auto arenas. */
+ assert_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ if (narenas == 1) {
+ return;
+ }
+
+ /* Install custom extent hooks on arena 1 (might not be initialized). */
+ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
+ &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
+ hooks_mib[1] = 1;
+ old_size = sizeof(extent_hooks_t *);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, (void *)&new_hooks, new_size), 0,
+ "Unexpected extent_hooks error");
+ static bool auto_arena_created = false;
+ if (old_hooks != &hooks) {
+ assert_b_eq(auto_arena_created, false,
+ "Expected auto arena 1 created only once.");
+ auto_arena_created = true;
+ }
+}
+
+static void
+test_manual_hook_body(void) {
+ unsigned arena_ind;
+ size_t old_size, new_size, sz;
+ size_t hooks_mib[3];
+ size_t hooks_miblen;
+ extent_hooks_t *new_hooks, *old_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+
+ /* Install custom extent hooks. */
+ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
+ &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
+ hooks_mib[1] = (size_t)arena_ind;
+ old_size = sizeof(extent_hooks_t *);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, (void *)&new_hooks, new_size), 0,
+ "Unexpected extent_hooks error");
+ assert_ptr_ne(old_hooks->alloc, extent_alloc_hook,
+ "Unexpected extent_hooks error");
+ assert_ptr_ne(old_hooks->dalloc, extent_dalloc_hook,
+ "Unexpected extent_hooks error");
+ assert_ptr_ne(old_hooks->commit, extent_commit_hook,
+ "Unexpected extent_hooks error");
+ assert_ptr_ne(old_hooks->decommit, extent_decommit_hook,
+ "Unexpected extent_hooks error");
+ assert_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook,
+ "Unexpected extent_hooks error");
+ assert_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook,
+ "Unexpected extent_hooks error");
+ assert_ptr_ne(old_hooks->split, extent_split_hook,
+ "Unexpected extent_hooks error");
+ assert_ptr_ne(old_hooks->merge, extent_merge_hook,
+ "Unexpected extent_hooks error");
+
+ if (!check_background_thread_enabled()) {
+ test_extent_body(arena_ind);
+ }
+
+ /* Restore extent hooks. */
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
+ (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error");
+ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, NULL, 0), 0, "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks->alloc, default_hooks->alloc,
+ "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks->dalloc, default_hooks->dalloc,
+ "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks->commit, default_hooks->commit,
+ "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks->decommit, default_hooks->decommit,
+ "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy,
+ "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced,
+ "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks->split, default_hooks->split,
+ "Unexpected extent_hooks error");
+ assert_ptr_eq(old_hooks->merge, default_hooks->merge,
+ "Unexpected extent_hooks error");
+}
+
+TEST_BEGIN(test_extent_manual_hook) {
+ test_manual_hook_auto_arena();
+ test_manual_hook_body();
+
+ /* Test failure paths. */
+ try_split = false;
+ test_manual_hook_body();
+ try_merge = false;
+ test_manual_hook_body();
+ try_purge_lazy = false;
+ try_purge_forced = false;
+ test_manual_hook_body();
+
+ try_split = try_merge = try_purge_lazy = try_purge_forced = true;
+}
+TEST_END
+
+TEST_BEGIN(test_extent_auto_hook) {
+ unsigned arena_ind;
+ size_t new_size, sz;
+ extent_hooks_t *new_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure");
+
+ test_skip_if(check_background_thread_enabled());
+ test_extent_body(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_extent_manual_hook,
+ test_extent_auto_hook);
+}
diff --git a/deps/jemalloc/test/integration/extent.sh b/deps/jemalloc/test/integration/extent.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/extent.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/malloc.c b/deps/jemalloc/test/integration/malloc.c
new file mode 100644
index 0000000..8b33bc8
--- /dev/null
+++ b/deps/jemalloc/test/integration/malloc.c
@@ -0,0 +1,16 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_zero_alloc) {
+ void *res = malloc(0);
+ assert(res);
+ size_t usable = malloc_usable_size(res);
+ assert(usable > 0);
+ free(res);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_zero_alloc);
+}
diff --git a/deps/jemalloc/test/integration/mallocx.c b/deps/jemalloc/test/integration/mallocx.c
new file mode 100644
index 0000000..645d4db
--- /dev/null
+++ b/deps/jemalloc/test/integration/mallocx.c
@@ -0,0 +1,274 @@
+#include "test/jemalloc_test.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ assert_ptr_null(mallocx(largemax+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", largemax+1);
+
+ assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ assert_ptr_null(mallocx(SIZE_T_MAX, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+ assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+static void *
+remote_alloc(void *arg) {
+ unsigned arena;
+ size_t sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ size_t large_sz;
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena)
+ | MALLOCX_TCACHE_NONE);
+ void **ret = (void **)arg;
+ *ret = ptr;
+
+ return NULL;
+}
+
+TEST_BEGIN(test_remote_free) {
+ thd_t thd;
+ void *ret;
+ thd_create(&thd, remote_alloc, (void *)&ret);
+ thd_join(thd, NULL);
+ assert_ptr_not_null(ret, "Unexpected mallocx failure");
+
+ /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
+ dallocx(ret, 0);
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_oom) {
+ size_t largemax;
+ bool oom;
+ void *ptrs[3];
+ unsigned i;
+
+ /*
+ * It should be impossible to allocate three objects that each consume
+ * nearly half the virtual address space.
+ */
+ largemax = get_large_size(get_nlarge()-1);
+ oom = false;
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ ptrs[i] = mallocx(largemax, MALLOCX_ARENA(0));
+ if (ptrs[i] == NULL) {
+ oom = true;
+ }
+ }
+ assert_true(oom,
+ "Expected OOM during series of calls to mallocx(size=%zu, 0)",
+ largemax);
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ if (ptrs[i] != NULL) {
+ dallocx(ptrs[i], 0);
+ }
+ }
+ purge();
+
+#if LG_SIZEOF_PTR == 3
+ assert_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x8000000000000000ULL)),
+ "Expected OOM for mallocx()");
+ assert_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x80000000)),
+ "Expected OOM for mallocx()");
+#else
+ assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
+ "Expected OOM for mallocx()");
+#endif
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_basic) {
+#define MAXSZ (((size_t)1) << 23)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ size_t nsz, rsz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ rsz = sallocx(p, 0);
+ assert_zu_ge(rsz, sz, "Real size smaller than expected");
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ dallocx(p, 0);
+
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ZERO);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
+ nsz);
+ rsz = sallocx(p, 0);
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ dallocx(p, 0);
+ purge();
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ const char *percpu_arena;
+ size_t sz = sizeof(percpu_arena);
+
+ if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
+ strcmp(percpu_arena, "disabled") != 0) {
+ test_skip("test_alignment_and_size skipped: "
+ "not working with percpu arena.");
+ };
+#define MAXALIGN (((size_t)1) << 23)
+#define NITER 4
+ size_t nsz, rsz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO | MALLOCX_ARENA(0));
+ assert_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO | MALLOCX_ARENA(0));
+ assert_ptr_not_null(ps[i],
+ "mallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ assert_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef MAXALIGN
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_overflow,
+ test_oom,
+ test_remote_free,
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/mallocx.sh b/deps/jemalloc/test/integration/mallocx.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/mallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/overflow.c b/deps/jemalloc/test/integration/overflow.c
new file mode 100644
index 0000000..748ebb6
--- /dev/null
+++ b/deps/jemalloc/test/integration/overflow.c
@@ -0,0 +1,59 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ unsigned nlextents;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+ void *p;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ 0), 0, "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nlextents - 1;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
+ NULL, 0), 0, "Unexpected mallctlbymib() error");
+
+ assert_ptr_null(malloc(max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(malloc(SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ assert_ptr_null(calloc(1, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(calloc(1, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() OOM");
+ assert_ptr_null(realloc(p, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(realloc(p, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+ free(p);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+int
+main(void) {
+ return test(
+ test_overflow);
+}
diff --git a/deps/jemalloc/test/integration/posix_memalign.c b/deps/jemalloc/test/integration/posix_memalign.c
new file mode 100644
index 0000000..d992260
--- /dev/null
+++ b/deps/jemalloc/test/integration/posix_memalign.c
@@ -0,0 +1,128 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors) {
+ size_t alignment;
+ void *p;
+
+ for (alignment = 0; alignment < sizeof(void *); alignment++) {
+ assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment);
+ }
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors) {
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ int err;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 0;
+ size < 3 * alignment && size < (1U << 31);
+ size += ((size == 0) ? 1 :
+ (alignment >> (LG_SIZEOF_PTR-1)) - 1)) {
+ for (i = 0; i < NITER; i++) {
+ err = posix_memalign(&ps[i],
+ alignment, size);
+ if (err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += malloc_usable_size(ps[i]);
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/rallocx.c b/deps/jemalloc/test/integration/rallocx.c
new file mode 100644
index 0000000..08ed08d
--- /dev/null
+++ b/deps/jemalloc/test/integration/rallocx.c
@@ -0,0 +1,258 @@
+#include "test/jemalloc_test.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+TEST_BEGIN(test_grow_and_shrink) {
+ void *p, *q;
+ size_t tsz;
+#define NCYCLES 3
+ unsigned i, j;
+#define NSZS 1024
+ size_t szs[NSZS];
+#define MAXSZ ZU(12 * 1024 * 1024)
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ szs[0] = sallocx(p, 0);
+
+ for (i = 0; i < NCYCLES; i++) {
+ for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
+ q = rallocx(p, szs[j-1]+1, 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j-1], szs[j-1]+1);
+ szs[j] = sallocx(q, 0);
+ assert_zu_ne(szs[j], szs[j-1]+1,
+ "Expected size to be at least: %zu", szs[j-1]+1);
+ p = q;
+ }
+
+ for (j--; j > 0; j--) {
+ q = rallocx(p, szs[j-1], 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j], szs[j-1]);
+ tsz = sallocx(q, 0);
+ assert_zu_eq(tsz, szs[j-1],
+ "Expected size=%zu, got size=%zu", szs[j-1], tsz);
+ p = q;
+ }
+ }
+
+ dallocx(p, 0);
+#undef MAXSZ
+#undef NSZS
+#undef NCYCLES
+}
+TEST_END
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
+ bool ret = false;
+ const uint8_t *buf = (const uint8_t *)p;
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ uint8_t b = buf[offset+i];
+ if (b != c) {
+ test_fail("Allocation at %p (len=%zu) contains %#x "
+ "rather than %#x at offset %zu", p, len, b, c,
+ offset+i);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+TEST_BEGIN(test_zero) {
+ void *p, *q;
+ size_t psz, qsz, i, j;
+ size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
+#define FILL_BYTE 0xaaU
+#define RANGE 2048
+
+ for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
+ size_t start_size = start_sizes[i];
+ p = mallocx(start_size, MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ psz = sallocx(p, 0);
+
+ assert_false(validate_fill(p, 0, 0, psz),
+ "Expected zeroed memory");
+ memset(p, FILL_BYTE, psz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+
+ for (j = 1; j < RANGE; j++) {
+ q = rallocx(p, start_size+j, MALLOCX_ZERO);
+ assert_ptr_not_null(q, "Unexpected rallocx() error");
+ qsz = sallocx(q, 0);
+ if (q != p || qsz != psz) {
+ assert_false(validate_fill(q, FILL_BYTE, 0,
+ psz), "Expected filled memory");
+ assert_false(validate_fill(q, 0, psz, qsz-psz),
+ "Expected zeroed memory");
+ }
+ if (psz != qsz) {
+ memset((void *)((uintptr_t)q+psz), FILL_BYTE,
+ qsz-psz);
+ psz = qsz;
+ }
+ p = q;
+ }
+ assert_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+ dallocx(p, 0);
+ }
+#undef FILL_BYTE
+}
+TEST_END
+
+TEST_BEGIN(test_align) {
+ void *p, *q;
+ size_t align;
+#define MAX_ALIGN (ZU(1) << 25)
+
+ align = ZU(1);
+ p = mallocx(1, MALLOCX_ALIGN(align));
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
+ q = rallocx(p, 1, MALLOCX_ALIGN(align));
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for align=%zu", align);
+ assert_ptr_null(
+ (void *)((uintptr_t)q & (align-1)),
+ "%p inadequately aligned for align=%zu",
+ q, align);
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_ALIGN
+}
+TEST_END
+
+TEST_BEGIN(test_lg_align_and_zero) {
+ void *p, *q;
+ unsigned lg_align;
+ size_t sz;
+#define MAX_LG_ALIGN 25
+#define MAX_VALIDATE (ZU(1) << 22)
+
+ lg_align = 0;
+ p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
+ q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for lg_align=%u", lg_align);
+ assert_ptr_null(
+ (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
+ "%p inadequately aligned for lg_align=%u", q, lg_align);
+ sz = sallocx(q, 0);
+ if ((sz << 1) <= MAX_VALIDATE) {
+ assert_false(validate_fill(q, 0, 0, sz),
+ "Expected zeroed memory");
+ } else {
+ assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
+ "Expected zeroed memory");
+ assert_false(validate_fill(
+ (void *)((uintptr_t)q+sz-MAX_VALIDATE),
+ 0, 0, MAX_VALIDATE), "Expected zeroed memory");
+ }
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_VALIDATE
+#undef MAX_LG_ALIGN
+}
+TEST_END
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+ void *p;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_ptr_null(rallocx(p, largemax+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1);
+
+ assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
+
+ assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+int
+main(void) {
+ return test(
+ test_grow_and_shrink,
+ test_zero,
+ test_align,
+ test_lg_align_and_zero,
+ test_overflow);
+}
diff --git a/deps/jemalloc/test/integration/sdallocx.c b/deps/jemalloc/test/integration/sdallocx.c
new file mode 100644
index 0000000..ca01448
--- /dev/null
+++ b/deps/jemalloc/test/integration/sdallocx.c
@@ -0,0 +1,55 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 22)
+#define NITER 3
+
+TEST_BEGIN(test_basic) {
+ void *ptr = mallocx(64, 0);
+ sdallocx(ptr, 64, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ size_t nsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ total += nsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ sdallocx(ps[i], sz,
+ MALLOCX_ALIGN(alignment));
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/slab_sizes.c b/deps/jemalloc/test/integration/slab_sizes.c
new file mode 100644
index 0000000..af250c3
--- /dev/null
+++ b/deps/jemalloc/test/integration/slab_sizes.c
@@ -0,0 +1,80 @@
+#include "test/jemalloc_test.h"
+
+/* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */
+
+TEST_BEGIN(test_slab_sizes) {
+ unsigned nbins;
+ size_t page;
+ size_t sizemib[4];
+ size_t slabmib[4];
+ size_t len;
+
+ len = sizeof(nbins);
+ assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
+ "nbins mallctl failure");
+
+ len = sizeof(page);
+ assert_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0,
+ "page mallctl failure");
+
+ len = 4;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0,
+ "bin size mallctlnametomib failure");
+
+ len = 4;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len),
+ 0, "slab size mallctlnametomib failure");
+
+ size_t biggest_slab_seen = 0;
+
+ for (unsigned i = 0; i < nbins; i++) {
+ size_t bin_size;
+ size_t slab_size;
+ len = sizeof(size_t);
+ sizemib[2] = i;
+ slabmib[2] = i;
+ assert_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len,
+ NULL, 0), 0, "bin size mallctlbymib failure");
+
+ len = sizeof(size_t);
+ assert_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len,
+ NULL, 0), 0, "slab size mallctlbymib failure");
+
+ if (bin_size < 100) {
+ /*
+ * Then we should be as close to 17 as possible. Since
+ * not all page sizes are valid (because of bitmap
+ * limitations on the number of items in a slab), we
+ * should at least make sure that the number of pages
+ * goes up.
+ */
+ assert_zu_ge(slab_size, biggest_slab_seen,
+ "Slab sizes should go up");
+ biggest_slab_seen = slab_size;
+ } else if (
+ (100 <= bin_size && bin_size < 128)
+ || (128 < bin_size && bin_size <= 200)) {
+ assert_zu_eq(slab_size, page,
+ "Forced-small slabs should be small");
+ } else if (bin_size == 128) {
+ assert_zu_eq(slab_size, 2 * page,
+ "Forced-2-page slab should be 2 pages");
+ } else if (200 < bin_size && bin_size <= 4096) {
+ assert_zu_ge(slab_size, biggest_slab_seen,
+ "Slab sizes should go up");
+ biggest_slab_seen = slab_size;
+ }
+ }
+ /*
+ * For any reasonable configuration, 17 pages should be a valid slab
+ * size for 4096-byte items.
+ */
+ assert_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_slab_sizes);
+}
diff --git a/deps/jemalloc/test/integration/slab_sizes.sh b/deps/jemalloc/test/integration/slab_sizes.sh
new file mode 100644
index 0000000..07e3db8
--- /dev/null
+++ b/deps/jemalloc/test/integration/slab_sizes.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+# Some screwy-looking slab sizes.
+export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2"
diff --git a/deps/jemalloc/test/integration/smallocx.c b/deps/jemalloc/test/integration/smallocx.c
new file mode 100644
index 0000000..2486752
--- /dev/null
+++ b/deps/jemalloc/test/integration/smallocx.c
@@ -0,0 +1,312 @@
+#include "test/jemalloc_test.h"
+#include "jemalloc/jemalloc_macros.h"
+
+#define STR_HELPER(x) #x
+#define STR(x) STR_HELPER(x)
+
+#ifndef JEMALLOC_VERSION_GID_IDENT
+ #error "JEMALLOC_VERSION_GID_IDENT not defined"
+#endif
+
+#define JOIN(x, y) x ## y
+#define JOIN2(x, y) JOIN(x, y)
+#define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT)
+
+typedef struct {
+ void *ptr;
+ size_t size;
+} smallocx_return_t;
+
+extern smallocx_return_t
+smallocx(size_t size, int flags);
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ assert_ptr_null(smallocx(largemax+1, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", largemax+1);
+
+ assert_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ assert_ptr_null(smallocx(SIZE_T_MAX, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+ assert_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr,
+ "Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+static void *
+remote_alloc(void *arg) {
+ unsigned arena;
+ size_t sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ size_t large_sz;
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ smallocx_return_t r
+ = smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
+ void *ptr = r.ptr;
+ assert_zu_eq(r.size,
+ nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE),
+ "Expected smalloc(size,flags).size == nallocx(size,flags)");
+ void **ret = (void **)arg;
+ *ret = ptr;
+
+ return NULL;
+}
+
+TEST_BEGIN(test_remote_free) {
+ thd_t thd;
+ void *ret;
+ thd_create(&thd, remote_alloc, (void *)&ret);
+ thd_join(thd, NULL);
+ assert_ptr_not_null(ret, "Unexpected smallocx failure");
+
+ /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
+ dallocx(ret, 0);
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_oom) {
+ size_t largemax;
+ bool oom;
+ void *ptrs[3];
+ unsigned i;
+
+ /*
+ * It should be impossible to allocate three objects that each consume
+ * nearly half the virtual address space.
+ */
+ largemax = get_large_size(get_nlarge()-1);
+ oom = false;
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ ptrs[i] = smallocx(largemax, 0).ptr;
+ if (ptrs[i] == NULL) {
+ oom = true;
+ }
+ }
+ assert_true(oom,
+ "Expected OOM during series of calls to smallocx(size=%zu, 0)",
+ largemax);
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ if (ptrs[i] != NULL) {
+ dallocx(ptrs[i], 0);
+ }
+ }
+ purge();
+
+#if LG_SIZEOF_PTR == 3
+ assert_ptr_null(smallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x8000000000000000ULL)).ptr,
+ "Expected OOM for smallocx()");
+ assert_ptr_null(smallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x80000000)).ptr,
+ "Expected OOM for smallocx()");
+#else
+ assert_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr,
+ "Expected OOM for smallocx()");
+#endif
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_basic) {
+#define MAXSZ (((size_t)1) << 23)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ smallocx_return_t ret;
+ size_t nsz, rsz, smz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ ret = smallocx(sz, 0);
+ p = ret.ptr;
+ smz = ret.size;
+ assert_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=0) error", sz);
+ rsz = sallocx(p, 0);
+ assert_zu_ge(rsz, sz, "Real size smaller than expected");
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
+ dallocx(p, 0);
+
+ ret = smallocx(sz, 0);
+ p = ret.ptr;
+ smz = ret.size;
+ assert_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=0) error", sz);
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ assert_zu_ne(smz, 0, "Unexpected smallocx() error");
+ ret = smallocx(sz, MALLOCX_ZERO);
+ p = ret.ptr;
+ assert_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error",
+ nsz);
+ rsz = sallocx(p, 0);
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
+ dallocx(p, 0);
+ purge();
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ const char *percpu_arena;
+ size_t sz = sizeof(percpu_arena);
+
+ if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
+ strcmp(percpu_arena, "disabled") != 0) {
+ test_skip("test_alignment_and_size skipped: "
+ "not working with percpu arena.");
+ };
+#define MAXALIGN (((size_t)1) << 23)
+#define NITER 4
+ size_t nsz, rsz, smz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ smallocx_return_t ret
+ = smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO);
+ ps[i] = ret.ptr;
+ assert_ptr_not_null(ps[i],
+ "smallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ smz = ret.size;
+ assert_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_zu_eq(nsz, smz,
+ "nallocx()/smallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef MAXALIGN
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_overflow,
+ test_oom,
+ test_remote_free,
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/smallocx.sh b/deps/jemalloc/test/integration/smallocx.sh
new file mode 100644
index 0000000..d07f10f
--- /dev/null
+++ b/deps/jemalloc/test/integration/smallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/thread_arena.c b/deps/jemalloc/test/integration/thread_arena.c
new file mode 100644
index 0000000..1e5ec05
--- /dev/null
+++ b/deps/jemalloc/test/integration/thread_arena.c
@@ -0,0 +1,86 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+void *
+thd_start(void *arg) {
+ unsigned main_arena_ind = *(unsigned *)arg;
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Error in malloc()");
+ free(p);
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&arena_ind, &size,
+ (void *)&main_arena_ind, sizeof(main_arena_ind)))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
+ 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+ assert_u_eq(arena_ind, main_arena_ind,
+ "Arena index should be same as for main thread");
+
+ return NULL;
+}
+
+static void
+mallctl_failure(int err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+}
+
+TEST_BEGIN(test_thread_arena) {
+ void *p;
+ int err;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Error in malloc()");
+
+ unsigned arena_ind, old_arena_ind;
+ size_t sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Arena creation failure");
+
+ size_t size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size,
+ (void *)&arena_ind, sizeof(arena_ind))) != 0) {
+ mallctl_failure(err);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)&arena_ind);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ intptr_t join_ret;
+ thd_join(thds[i], (void *)&join_ret);
+ assert_zd_eq(join_ret, 0, "Unexpected thread join error");
+ }
+ free(p);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_thread_arena);
+}
diff --git a/deps/jemalloc/test/integration/thread_tcache_enabled.c b/deps/jemalloc/test/integration/thread_tcache_enabled.c
new file mode 100644
index 0000000..95c9acc
--- /dev/null
+++ b/deps/jemalloc/test/integration/thread_tcache_enabled.c
@@ -0,0 +1,87 @@
+#include "test/jemalloc_test.h"
+
+void *
+thd_start(void *arg) {
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ if (e0) {
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+ }
+
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ return NULL;
+}
+
+TEST_BEGIN(test_main_thread) {
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread) {
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void) {
+ /* Run tests multiple times to check for bad interactions. */
+ return test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread);
+}
diff --git a/deps/jemalloc/test/integration/xallocx.c b/deps/jemalloc/test/integration/xallocx.c
new file mode 100644
index 0000000..cd0ca04
--- /dev/null
+++ b/deps/jemalloc/test/integration/xallocx.c
@@ -0,0 +1,384 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Use a separate arena for xallocx() extension/contraction tests so that
+ * internal allocation e.g. by heap profiling can't interpose allocations where
+ * xallocx() would ordinarily be able to extend.
+ */
+static unsigned
+arena_ind(void) {
+ static unsigned ind = 0;
+
+ if (ind == 0) {
+ size_t sz = sizeof(ind);
+ assert_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure creating arena");
+ }
+
+ return ind;
+}
+
+TEST_BEGIN(test_same_size) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_no_move) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, sz-42, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_no_move_fail) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz + 5, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nsmall(void) {
+ return get_nsizes_impl("arenas.nbins");
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_small_size(size_t ind) {
+ return get_size_impl("arenas.bin.0.size", ind);
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+TEST_BEGIN(test_size) {
+ size_t small0, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test smallest supported size. */
+ assert_zu_eq(xallocx(p, 1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test largest supported size. */
+ assert_zu_le(xallocx(p, largemax, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test size overflow. */
+ assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_size_extra_overflow) {
+ size_t small0, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test overflows that can be resolved by clamping extra. */
+ assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, largemax, 1, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test overflow such that largemax-size underflows. */
+ assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_small) {
+ size_t small0, small1, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ small1 = get_small_size(1);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test size+extra overflow. */
+ assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_large) {
+ int flags = MALLOCX_ARENA(arena_ind());
+ size_t smallmax, large1, large2, large3, largemax;
+ void *p;
+
+ /* Get size classes. */
+ smallmax = get_small_size(get_nsmall()-1);
+ large1 = get_large_size(1);
+ large2 = get_large_size(2);
+ large3 = get_large_size(3);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(large3, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, large3, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ assert_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, smallmax, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+
+ if (xallocx(p, large3, 0, flags) != large3) {
+ p = rallocx(p, large3, flags);
+ assert_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ /* Test size decrease with non-zero extra. */
+ assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, large1, large2 - large1, flags), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ assert_zu_le(xallocx(p, large3, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, largemax+1, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3,
+ "Unexpected xallocx() behavior");
+
+ if (xallocx(p, large3, 0, flags) != large3) {
+ p = rallocx(p, large3, flags);
+ assert_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ /* Test size+extra overflow. */
+ assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, flags);
+}
+TEST_END
+
+static void
+print_filled_extents(const void *p, uint8_t c, size_t len) {
+ const uint8_t *pc = (const uint8_t *)p;
+ size_t i, range0;
+ uint8_t c0;
+
+ malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len);
+ range0 = 0;
+ c0 = pc[0];
+ for (i = 0; i < len; i++) {
+ if (pc[i] != c0) {
+ malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
+ range0 = i;
+ c0 = pc[i];
+ }
+ }
+ malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
+}
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
+ const uint8_t *pc = (const uint8_t *)p;
+ bool err;
+ size_t i;
+
+ for (i = offset, err = false; i < offset+len; i++) {
+ if (pc[i] != c) {
+ err = true;
+ }
+ }
+
+ if (err) {
+ print_filled_extents(p, c, offset + len);
+ }
+
+ return err;
+}
+
+static void
+test_zero(size_t szmin, size_t szmax) {
+ int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
+ size_t sz, nsz;
+ void *p;
+#define FILL_BYTE 0x7aU
+
+ sz = szmax;
+ p = mallocx(sz, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
+ sz);
+
+ /*
+ * Fill with non-zero so that non-debug builds are more likely to detect
+ * errors.
+ */
+ memset(p, FILL_BYTE, sz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ /* Shrink in place so that we can expect growing in place to succeed. */
+ sz = szmin;
+ if (xallocx(p, sz, 0, flags) != sz) {
+ p = rallocx(p, sz, flags);
+ assert_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ for (sz = szmin; sz < szmax; sz = nsz) {
+ nsz = nallocx(sz+1, flags);
+ if (xallocx(p, sz+1, 0, flags) != nsz) {
+ p = rallocx(p, sz+1, flags);
+ assert_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+ assert_false(validate_fill(p, 0x00, sz, nsz-sz),
+ "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
+ memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, nsz),
+ "Memory not filled: nsz=%zu", nsz);
+ }
+
+ dallocx(p, flags);
+}
+
+TEST_BEGIN(test_zero_large) {
+ size_t large0, large1;
+
+ /* Get size classes. */
+ large0 = get_large_size(0);
+ large1 = get_large_size(1);
+
+ test_zero(large1, large0 * 2);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_same_size,
+ test_extra_no_move,
+ test_no_move_fail,
+ test_size,
+ test_size_extra_overflow,
+ test_extra_small,
+ test_extra_large,
+ test_zero_large);
+}
diff --git a/deps/jemalloc/test/integration/xallocx.sh b/deps/jemalloc/test/integration/xallocx.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/xallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/src/SFMT.c b/deps/jemalloc/test/src/SFMT.c
new file mode 100644
index 0000000..c05e218
--- /dev/null
+++ b/deps/jemalloc/test/src/SFMT.c
@@ -0,0 +1,719 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT.c
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software, see LICENSE.txt
+ */
+#define SFMT_C_
+#include "test/jemalloc_test.h"
+#include "test/SFMT-params.h"
+
+#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(ONLY64) && !defined(BIG_ENDIAN64)
+ #if defined(__GNUC__)
+ #error "-DONLY64 must be specified with -DBIG_ENDIAN64"
+ #endif
+#undef ONLY64
+#endif
+/*------------------------------------------------------
+ 128-bit SIMD data type for Altivec, SSE2 or standard C
+ ------------------------------------------------------*/
+#if defined(HAVE_ALTIVEC)
+/** 128-bit data structure */
+union W128_T {
+ vector unsigned int s;
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef union W128_T w128_t;
+
+#elif defined(HAVE_SSE2)
+/** 128-bit data structure */
+union W128_T {
+ __m128i si;
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef union W128_T w128_t;
+
+#else
+
+/** 128-bit data structure */
+struct W128_T {
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef struct W128_T w128_t;
+
+#endif
+
+struct sfmt_s {
+ /** the 128-bit internal state array */
+ w128_t sfmt[N];
+ /** index counter to the 32-bit internal state array */
+ int idx;
+ /** a flag: it is 0 if and only if the internal state is not yet
+ * initialized. */
+ int initialized;
+};
+
+/*--------------------------------------
+ FILE GLOBAL VARIABLES
+ internal state, index counter and flag
+ --------------------------------------*/
+
+/** a parity check vector which certificate the period of 2^{MEXP} */
+static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
+
+/*----------------
+ STATIC FUNCTIONS
+ ----------------*/
+static inline int idxof(int i);
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+static inline void rshift128(w128_t *out, w128_t const *in, int shift);
+static inline void lshift128(w128_t *out, w128_t const *in, int shift);
+#endif
+static inline void gen_rand_all(sfmt_t *ctx);
+static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
+static inline uint32_t func1(uint32_t x);
+static inline uint32_t func2(uint32_t x);
+static void period_certification(sfmt_t *ctx);
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+static inline void swap(w128_t *array, int size);
+#endif
+
+#if defined(HAVE_ALTIVEC)
+ #include "test/SFMT-alti.h"
+#elif defined(HAVE_SSE2)
+ #include "test/SFMT-sse2.h"
+#endif
+
+/**
+ * This function simulate a 64-bit index of LITTLE ENDIAN
+ * in BIG ENDIAN machine.
+ */
+#ifdef ONLY64
+static inline int idxof(int i) {
+ return i ^ 1;
+}
+#else
+static inline int idxof(int i) {
+ return i;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit right shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+#ifdef ONLY64
+static inline void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+static inline void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit left shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#ifdef ONLY64
+static inline void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+static inline void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+#endif
+
+/**
+ * This function represents the recursion formula.
+ * @param r output
+ * @param a a 128-bit part of the internal state array
+ * @param b a 128-bit part of the internal state array
+ * @param c a 128-bit part of the internal state array
+ * @param d a 128-bit part of the internal state array
+ */
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+#ifdef ONLY64
+static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SL2);
+ rshift128(&y, c, SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
+ ^ (d->u[0] << SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
+ ^ (d->u[1] << SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
+ ^ (d->u[2] << SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
+ ^ (d->u[3] << SL1);
+}
+#else
+static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SL2);
+ rshift128(&y, c, SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
+ ^ (d->u[0] << SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
+ ^ (d->u[1] << SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
+ ^ (d->u[2] << SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
+ ^ (d->u[3] << SL1);
+}
+#endif
+#endif
+
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+static inline void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ w128_t *r1, *r2;
+
+ r1 = &ctx->sfmt[N - 2];
+ r2 = &ctx->sfmt[N - 1];
+ for (i = 0; i < N - POS1; i++) {
+ do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
+ r2);
+ r1 = r2;
+ r2 = &ctx->sfmt[i];
+ }
+ for (; i < N; i++) {
+ do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
+ r2);
+ r1 = r2;
+ r2 = &ctx->sfmt[i];
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pseudorandom numbers to be generated.
+ */
+static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ w128_t *r1, *r2;
+
+ r1 = &ctx->sfmt[N - 2];
+ r2 = &ctx->sfmt[N - 1];
+ for (i = 0; i < N - POS1; i++) {
+ do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (; i < N; i++) {
+ do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (; i < size - N; i++) {
+ do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ ctx->sfmt[j] = array[j + size - N];
+ }
+ for (; i < size; i++, j++) {
+ do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ ctx->sfmt[j] = array[i];
+ }
+}
+#endif
+
+#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
+static inline void swap(w128_t *array, int size) {
+ int i;
+ uint32_t x, y;
+
+ for (i = 0; i < size; i++) {
+ x = array[i].u[0];
+ y = array[i].u[2];
+ array[i].u[0] = array[i].u[1];
+ array[i].u[2] = array[i].u[3];
+ array[i].u[1] = x;
+ array[i].u[3] = y;
+ }
+}
+#endif
+/**
+ * This function represents a function used in the initialization
+ * by init_by_array
+ * @param x 32-bit integer
+ * @return 32-bit integer
+ */
+static uint32_t func1(uint32_t x) {
+ return (x ^ (x >> 27)) * (uint32_t)1664525UL;
+}
+
+/**
+ * This function represents a function used in the initialization
+ * by init_by_array
+ * @param x 32-bit integer
+ * @return 32-bit integer
+ */
+static uint32_t func2(uint32_t x) {
+ return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
+}
+
+/**
+ * This function certificate the period of 2^{MEXP}
+ */
+static void period_certification(sfmt_t *ctx) {
+ int inner = 0;
+ int i, j;
+ uint32_t work;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+
+ for (i = 0; i < 4; i++)
+ inner ^= psfmt32[idxof(i)] & parity[i];
+ for (i = 16; i > 0; i >>= 1)
+ inner ^= inner >> i;
+ inner &= 1;
+ /* check OK */
+ if (inner == 1) {
+ return;
+ }
+ /* check NG, and modification */
+ for (i = 0; i < 4; i++) {
+ work = 1;
+ for (j = 0; j < 32; j++) {
+ if ((work & parity[i]) != 0) {
+ psfmt32[idxof(i)] ^= work;
+ return;
+ }
+ work = work << 1;
+ }
+ }
+}
+
+/*----------------
+ PUBLIC FUNCTIONS
+ ----------------*/
+/**
+ * This function returns the identification string.
+ * The string shows the word size, the Mersenne exponent,
+ * and all parameters of this generator.
+ */
+const char *get_idstring(void) {
+ return IDSTR;
+}
+
+/**
+ * This function returns the minimum size of array used for \b
+ * fill_array32() function.
+ * @return minimum size of array used for fill_array32() function.
+ */
+int get_min_array_size32(void) {
+ return N32;
+}
+
+/**
+ * This function returns the minimum size of array used for \b
+ * fill_array64() function.
+ * @return minimum size of array used for fill_array64() function.
+ */
+int get_min_array_size64(void) {
+ return N64;
+}
+
+#ifndef ONLY64
+/**
+ * This function generates and returns 32-bit pseudorandom number.
+ * init_gen_rand or init_by_array must be called before this function.
+ * @return 32-bit pseudorandom number
+ */
+uint32_t gen_rand32(sfmt_t *ctx) {
+ uint32_t r;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+
+ assert(ctx->initialized);
+ if (ctx->idx >= N32) {
+ gen_rand_all(ctx);
+ ctx->idx = 0;
+ }
+ r = psfmt32[ctx->idx++];
+ return r;
+}
+
+/* Generate a random integer in [0..limit). */
+uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
+ uint32_t ret, above;
+
+ above = 0xffffffffU - (0xffffffffU % limit);
+ while (1) {
+ ret = gen_rand32(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
+ }
+ return ret;
+}
+#endif
+/**
+ * This function generates and returns 64-bit pseudorandom number.
+ * init_gen_rand or init_by_array must be called before this function.
+ * The function gen_rand64 should not be called after gen_rand32,
+ * unless an initialization is again executed.
+ * @return 64-bit pseudorandom number
+ */
+uint64_t gen_rand64(sfmt_t *ctx) {
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ uint32_t r1, r2;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+#else
+ uint64_t r;
+ uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
+#endif
+
+ assert(ctx->initialized);
+ assert(ctx->idx % 2 == 0);
+
+ if (ctx->idx >= N32) {
+ gen_rand_all(ctx);
+ ctx->idx = 0;
+ }
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ r1 = psfmt32[ctx->idx];
+ r2 = psfmt32[ctx->idx + 1];
+ ctx->idx += 2;
+ return ((uint64_t)r2 << 32) | r1;
+#else
+ r = psfmt64[ctx->idx / 2];
+ ctx->idx += 2;
+ return r;
+#endif
+}
+
+/* Generate a random integer in [0..limit). */
+uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
+ uint64_t ret, above;
+
+ above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
+ while (1) {
+ ret = gen_rand64(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
+ }
+ return ret;
+}
+
+#ifndef ONLY64
+/**
+ * This function generates pseudorandom 32-bit integers in the
+ * specified array[] by one call. The number of pseudorandom integers
+ * is specified by the argument size, which must be at least 624 and a
+ * multiple of four. The generation by this function is much faster
+ * than the following gen_rand function.
+ *
+ * For initialization, init_gen_rand or init_by_array must be called
+ * before the first call of this function. This function can not be
+ * used after calling gen_rand function, without initialization.
+ *
+ * @param array an array where pseudorandom 32-bit integers are filled
+ * by this function. The pointer to the array must be \b "aligned"
+ * (namely, must be a multiple of 16) in the SIMD version, since it
+ * refers to the address of a 128-bit integer. In the standard C
+ * version, the pointer is arbitrary.
+ *
+ * @param size the number of 32-bit pseudorandom integers to be
+ * generated. size must be a multiple of 4, and greater than or equal
+ * to (MEXP / 128 + 1) * 4.
+ *
+ * @note \b memalign or \b posix_memalign is available to get aligned
+ * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
+ * returns the pointer to the aligned memory block.
+ */
+void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
+ assert(ctx->initialized);
+ assert(ctx->idx == N32);
+ assert(size % 4 == 0);
+ assert(size >= N32);
+
+ gen_rand_array(ctx, (w128_t *)array, size / 4);
+ ctx->idx = N32;
+}
+#endif
+
+/**
+ * This function generates pseudorandom 64-bit integers in the
+ * specified array[] by one call. The number of pseudorandom integers
+ * is specified by the argument size, which must be at least 312 and a
+ * multiple of two. The generation by this function is much faster
+ * than the following gen_rand function.
+ *
+ * For initialization, init_gen_rand or init_by_array must be called
+ * before the first call of this function. This function can not be
+ * used after calling gen_rand function, without initialization.
+ *
+ * @param array an array where pseudorandom 64-bit integers are filled
+ * by this function. The pointer to the array must be "aligned"
+ * (namely, must be a multiple of 16) in the SIMD version, since it
+ * refers to the address of a 128-bit integer. In the standard C
+ * version, the pointer is arbitrary.
+ *
+ * @param size the number of 64-bit pseudorandom integers to be
+ * generated. size must be a multiple of 2, and greater than or equal
+ * to (MEXP / 128 + 1) * 2
+ *
+ * @note \b memalign or \b posix_memalign is available to get aligned
+ * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
+ * returns the pointer to the aligned memory block.
+ */
+void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
+ assert(ctx->initialized);
+ assert(ctx->idx == N32);
+ assert(size % 2 == 0);
+ assert(size >= N64);
+
+ gen_rand_array(ctx, (w128_t *)array, size / 2);
+ ctx->idx = N32;
+
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ swap((w128_t *)array, size /2);
+#endif
+}
+
+/**
+ * This function initializes the internal state array with a 32-bit
+ * integer seed.
+ *
+ * @param seed a 32-bit integer used as the seed.
+ */
+sfmt_t *init_gen_rand(uint32_t seed) {
+ void *p;
+ sfmt_t *ctx;
+ int i;
+ uint32_t *psfmt32;
+
+ if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
+ return NULL;
+ }
+ ctx = (sfmt_t *)p;
+ psfmt32 = &ctx->sfmt[0].u[0];
+
+ psfmt32[idxof(0)] = seed;
+ for (i = 1; i < N32; i++) {
+ psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
+ ^ (psfmt32[idxof(i - 1)] >> 30))
+ + i;
+ }
+ ctx->idx = N32;
+ period_certification(ctx);
+ ctx->initialized = 1;
+
+ return ctx;
+}
+
+/**
+ * This function initializes the internal state array,
+ * with an array of 32-bit integers used as the seeds
+ * @param init_key the array of 32-bit integers, used as a seed.
+ * @param key_length the length of init_key.
+ */
+sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
+ void *p;
+ sfmt_t *ctx;
+ int i, j, count;
+ uint32_t r;
+ int lag;
+ int mid;
+ int size = N * 4;
+ uint32_t *psfmt32;
+
+ if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
+ return NULL;
+ }
+ ctx = (sfmt_t *)p;
+ psfmt32 = &ctx->sfmt[0].u[0];
+
+ if (size >= 623) {
+ lag = 11;
+ } else if (size >= 68) {
+ lag = 7;
+ } else if (size >= 39) {
+ lag = 5;
+ } else {
+ lag = 3;
+ }
+ mid = (size - lag) / 2;
+
+ memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
+ if (key_length + 1 > N32) {
+ count = key_length + 1;
+ } else {
+ count = N32;
+ }
+ r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
+ ^ psfmt32[idxof(N32 - 1)]);
+ psfmt32[idxof(mid)] += r;
+ r += key_length;
+ psfmt32[idxof(mid + lag)] += r;
+ psfmt32[idxof(0)] = r;
+
+ count--;
+ for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
+ r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
+ ^ psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] += r;
+ r += init_key[j] + i;
+ psfmt32[idxof((i + mid + lag) % N32)] += r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+ for (; j < count; j++) {
+ r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
+ ^ psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] += r;
+ r += i;
+ psfmt32[idxof((i + mid + lag) % N32)] += r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+ for (j = 0; j < N32; j++) {
+ r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
+ + psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] ^= r;
+ r -= i;
+ psfmt32[idxof((i + mid + lag) % N32)] ^= r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+
+ ctx->idx = N32;
+ period_certification(ctx);
+ ctx->initialized = 1;
+
+ return ctx;
+}
+
+void fini_gen_rand(sfmt_t *ctx) {
+ assert(ctx != NULL);
+
+ ctx->initialized = 0;
+ free(ctx);
+}
diff --git a/deps/jemalloc/test/src/btalloc.c b/deps/jemalloc/test/src/btalloc.c
new file mode 100644
index 0000000..d570952
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc.c
@@ -0,0 +1,6 @@
+#include "test/jemalloc_test.h"
+
+void *
+btalloc(size_t size, unsigned bits) {
+ return btalloc_0(size, bits);
+}
diff --git a/deps/jemalloc/test/src/btalloc_0.c b/deps/jemalloc/test/src/btalloc_0.c
new file mode 100644
index 0000000..77d8904
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc_0.c
@@ -0,0 +1,3 @@
+#include "test/jemalloc_test.h"
+
+btalloc_n_gen(0)
diff --git a/deps/jemalloc/test/src/btalloc_1.c b/deps/jemalloc/test/src/btalloc_1.c
new file mode 100644
index 0000000..4c126c3
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc_1.c
@@ -0,0 +1,3 @@
+#include "test/jemalloc_test.h"
+
+btalloc_n_gen(1)
diff --git a/deps/jemalloc/test/src/math.c b/deps/jemalloc/test/src/math.c
new file mode 100644
index 0000000..1758c67
--- /dev/null
+++ b/deps/jemalloc/test/src/math.c
@@ -0,0 +1,2 @@
+#define MATH_C_
+#include "test/jemalloc_test.h"
diff --git a/deps/jemalloc/test/src/mq.c b/deps/jemalloc/test/src/mq.c
new file mode 100644
index 0000000..9b5f672
--- /dev/null
+++ b/deps/jemalloc/test/src/mq.c
@@ -0,0 +1,27 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep
+ * time is guaranteed.
+ */
+void
+mq_nanosleep(unsigned ns) {
+ assert(ns <= 1000*1000*1000);
+
+#ifdef _WIN32
+ Sleep(ns / 1000);
+#else
+ {
+ struct timespec timeout;
+
+ if (ns < 1000*1000*1000) {
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = ns;
+ } else {
+ timeout.tv_sec = 1;
+ timeout.tv_nsec = 0;
+ }
+ nanosleep(&timeout, NULL);
+ }
+#endif
+}
diff --git a/deps/jemalloc/test/src/mtx.c b/deps/jemalloc/test/src/mtx.c
new file mode 100644
index 0000000..d9ce375
--- /dev/null
+++ b/deps/jemalloc/test/src/mtx.c
@@ -0,0 +1,61 @@
+#include "test/jemalloc_test.h"
+
+#ifndef _CRT_SPINCOUNT
+#define _CRT_SPINCOUNT 4000
+#endif
+
+bool
+mtx_init(mtx_t *mtx) {
+#ifdef _WIN32
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->lock,
+ _CRT_SPINCOUNT)) {
+ return true;
+ }
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ mtx->lock = OS_UNFAIR_LOCK_INIT;
+#else
+ pthread_mutexattr_t attr;
+
+ if (pthread_mutexattr_init(&attr) != 0) {
+ return true;
+ }
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
+ if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return true;
+ }
+ pthread_mutexattr_destroy(&attr);
+#endif
+ return false;
+}
+
+void
+mtx_fini(mtx_t *mtx) {
+#ifdef _WIN32
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+#else
+ pthread_mutex_destroy(&mtx->lock);
+#endif
+}
+
+void
+mtx_lock(mtx_t *mtx) {
+#ifdef _WIN32
+ EnterCriticalSection(&mtx->lock);
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock_lock(&mtx->lock);
+#else
+ pthread_mutex_lock(&mtx->lock);
+#endif
+}
+
+void
+mtx_unlock(mtx_t *mtx) {
+#ifdef _WIN32
+ LeaveCriticalSection(&mtx->lock);
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ os_unfair_lock_unlock(&mtx->lock);
+#else
+ pthread_mutex_unlock(&mtx->lock);
+#endif
+}
diff --git a/deps/jemalloc/test/src/test.c b/deps/jemalloc/test/src/test.c
new file mode 100644
index 0000000..f97ce4d
--- /dev/null
+++ b/deps/jemalloc/test/src/test.c
@@ -0,0 +1,234 @@
+#include "test/jemalloc_test.h"
+
+/* Test status state. */
+
+static unsigned test_count = 0;
+static test_status_t test_counts[test_status_count] = {0, 0, 0};
+static test_status_t test_status = test_status_pass;
+static const char * test_name = "";
+
+/* Reentrancy testing helpers. */
+
+#define NUM_REENTRANT_ALLOCS 20
+typedef enum {
+ non_reentrant = 0,
+ libc_reentrant = 1,
+ arena_new_reentrant = 2
+} reentrancy_t;
+static reentrancy_t reentrancy;
+
+static bool libc_hook_ran = false;
+static bool arena_new_hook_ran = false;
+
+static const char *
+reentrancy_t_str(reentrancy_t r) {
+ switch (r) {
+ case non_reentrant:
+ return "non-reentrant";
+ case libc_reentrant:
+ return "libc-reentrant";
+ case arena_new_reentrant:
+ return "arena_new-reentrant";
+ default:
+ unreachable();
+ }
+}
+
+static void
+do_hook(bool *hook_ran, void (**hook)()) {
+ *hook_ran = true;
+ *hook = NULL;
+
+ size_t alloc_size = 1;
+ for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
+ free(malloc(alloc_size));
+ alloc_size *= 2;
+ }
+}
+
+static void
+libc_reentrancy_hook() {
+ do_hook(&libc_hook_ran, &test_hooks_libc_hook);
+}
+
+static void
+arena_new_reentrancy_hook() {
+ do_hook(&arena_new_hook_ran, &test_hooks_arena_new_hook);
+}
+
+/* Actual test infrastructure. */
+bool
+test_is_reentrant() {
+ return reentrancy != non_reentrant;
+}
+
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+test_skip(const char *format, ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+ malloc_printf("\n");
+ test_status = test_status_skip;
+}
+
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+test_fail(const char *format, ...) {
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+ malloc_printf("\n");
+ test_status = test_status_fail;
+}
+
+static const char *
+test_status_string(test_status_t test_status) {
+ switch (test_status) {
+ case test_status_pass: return "pass";
+ case test_status_skip: return "skip";
+ case test_status_fail: return "fail";
+ default: not_reached();
+ }
+}
+
+void
+p_test_init(const char *name) {
+ test_count++;
+ test_status = test_status_pass;
+ test_name = name;
+}
+
+void
+p_test_fini(void) {
+ test_counts[test_status]++;
+ malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy),
+ test_status_string(test_status));
+}
+
+static void
+check_global_slow(test_status_t *status) {
+#ifdef JEMALLOC_UNIT_TEST
+ /*
+ * This check needs to peek into tsd internals, which is why it's only
+ * exposed in unit tests.
+ */
+ if (tsd_global_slow()) {
+ malloc_printf("Testing increased global slow count\n");
+ *status = test_status_fail;
+ }
+#endif
+}
+
+static test_status_t
+p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
+ test_status_t ret;
+
+ if (do_malloc_init) {
+ /*
+ * Make sure initialization occurs prior to running tests.
+ * Tests are special because they may use internal facilities
+ * prior to triggering initialization as a side effect of
+ * calling into the public API.
+ */
+ if (nallocx(1, 0) == 0) {
+ malloc_printf("Initialization error");
+ return test_status_fail;
+ }
+ }
+
+ ret = test_status_pass;
+ for (; t != NULL; t = va_arg(ap, test_t *)) {
+ /* Non-reentrant run. */
+ reentrancy = non_reentrant;
+ test_hooks_arena_new_hook = test_hooks_libc_hook = NULL;
+ t();
+ if (test_status > ret) {
+ ret = test_status;
+ }
+ check_global_slow(&ret);
+ /* Reentrant run. */
+ if (do_reentrant) {
+ reentrancy = libc_reentrant;
+ test_hooks_arena_new_hook = NULL;
+ test_hooks_libc_hook = &libc_reentrancy_hook;
+ t();
+ if (test_status > ret) {
+ ret = test_status;
+ }
+ check_global_slow(&ret);
+
+ reentrancy = arena_new_reentrant;
+ test_hooks_libc_hook = NULL;
+ test_hooks_arena_new_hook = &arena_new_reentrancy_hook;
+ t();
+ if (test_status > ret) {
+ ret = test_status;
+ }
+ check_global_slow(&ret);
+ }
+ }
+
+ malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
+ test_status_string(test_status_pass),
+ test_counts[test_status_pass], test_count,
+ test_status_string(test_status_skip),
+ test_counts[test_status_skip], test_count,
+ test_status_string(test_status_fail),
+ test_counts[test_status_fail], test_count);
+
+ return ret;
+}
+
+test_status_t
+p_test(test_t *t, ...) {
+ test_status_t ret;
+ va_list ap;
+
+ ret = test_status_pass;
+ va_start(ap, t);
+ ret = p_test_impl(true, true, t, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+test_status_t
+p_test_no_reentrancy(test_t *t, ...) {
+ test_status_t ret;
+ va_list ap;
+
+ ret = test_status_pass;
+ va_start(ap, t);
+ ret = p_test_impl(true, false, t, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+test_status_t
+p_test_no_malloc_init(test_t *t, ...) {
+ test_status_t ret;
+ va_list ap;
+
+ ret = test_status_pass;
+ va_start(ap, t);
+ /*
+ * We also omit reentrancy from bootstrapping tests, since we don't
+ * (yet) care about general reentrancy during bootstrapping.
+ */
+ ret = p_test_impl(false, false, t, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+void
+p_test_fail(const char *prefix, const char *message) {
+ malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
+ test_status = test_status_fail;
+}
diff --git a/deps/jemalloc/test/src/thd.c b/deps/jemalloc/test/src/thd.c
new file mode 100644
index 0000000..9a15eab
--- /dev/null
+++ b/deps/jemalloc/test/src/thd.c
@@ -0,0 +1,34 @@
+#include "test/jemalloc_test.h"
+
+#ifdef _WIN32
+void
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg) {
+ LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
+ *thd = CreateThread(NULL, 0, routine, arg, 0, NULL);
+ if (*thd == NULL) {
+ test_fail("Error in CreateThread()\n");
+ }
+}
+
+void
+thd_join(thd_t thd, void **ret) {
+ if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) {
+ DWORD exit_code;
+ GetExitCodeThread(thd, (LPDWORD) &exit_code);
+ *ret = (void *)(uintptr_t)exit_code;
+ }
+}
+
+#else
+void
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg) {
+ if (pthread_create(thd, NULL, proc, arg) != 0) {
+ test_fail("Error in pthread_create()\n");
+ }
+}
+
+void
+thd_join(thd_t thd, void **ret) {
+ pthread_join(thd, ret);
+}
+#endif
diff --git a/deps/jemalloc/test/src/timer.c b/deps/jemalloc/test/src/timer.c
new file mode 100644
index 0000000..c451c63
--- /dev/null
+++ b/deps/jemalloc/test/src/timer.c
@@ -0,0 +1,56 @@
+#include "test/jemalloc_test.h"
+
+void
+timer_start(timedelta_t *timer) {
+ nstime_init(&timer->t0, 0);
+ nstime_update(&timer->t0);
+}
+
+void
+timer_stop(timedelta_t *timer) {
+ nstime_copy(&timer->t1, &timer->t0);
+ nstime_update(&timer->t1);
+}
+
+uint64_t
+timer_usec(const timedelta_t *timer) {
+ nstime_t delta;
+
+ nstime_copy(&delta, &timer->t1);
+ nstime_subtract(&delta, &timer->t0);
+ return nstime_ns(&delta) / 1000;
+}
+
+void
+timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) {
+ uint64_t t0 = timer_usec(a);
+ uint64_t t1 = timer_usec(b);
+ uint64_t mult;
+ size_t i = 0;
+ size_t j, n;
+
+ /* Whole. */
+ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
+ i += n;
+ if (i >= buflen) {
+ return;
+ }
+ mult = 1;
+ for (j = 0; j < n; j++) {
+ mult *= 10;
+ }
+
+ /* Decimal. */
+ n = malloc_snprintf(&buf[i], buflen-i, ".");
+ i += n;
+
+ /* Fraction. */
+ while (i < buflen-1) {
+ uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10
+ >= 5)) ? 1 : 0;
+ n = malloc_snprintf(&buf[i], buflen-i,
+ "%"FMTu64, (t0 * mult / t1) % 10 + round);
+ i += n;
+ mult *= 10;
+ }
+}
diff --git a/deps/jemalloc/test/stress/hookbench.c b/deps/jemalloc/test/stress/hookbench.c
new file mode 100644
index 0000000..97e90b0
--- /dev/null
+++ b/deps/jemalloc/test/stress/hookbench.c
@@ -0,0 +1,73 @@
+#include "test/jemalloc_test.h"
+
+static void
+noop_alloc_hook(void *extra, hook_alloc_t type, void *result,
+ uintptr_t result_raw, uintptr_t args_raw[3]) {
+}
+
+static void
+noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
+ uintptr_t args_raw[3]) {
+}
+
+static void
+noop_expand_hook(void *extra, hook_expand_t type, void *address,
+ size_t old_usize, size_t new_usize, uintptr_t result_raw,
+ uintptr_t args_raw[4]) {
+}
+
+static void
+malloc_free_loop(int iters) {
+ for (int i = 0; i < iters; i++) {
+ void *p = mallocx(1, 0);
+ free(p);
+ }
+}
+
+static void
+test_hooked(int iters) {
+ hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook,
+ NULL};
+
+ int err;
+ void *handles[HOOK_MAX];
+ size_t sz = sizeof(handles[0]);
+
+ for (int i = 0; i < HOOK_MAX; i++) {
+ err = mallctl("experimental.hooks.install", &handles[i],
+ &sz, &hooks, sizeof(hooks));
+ assert(err == 0);
+
+ timedelta_t timer;
+ timer_start(&timer);
+ malloc_free_loop(iters);
+ timer_stop(&timer);
+ malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1,
+ i + 1 == 1 ? "" : "s", timer_usec(&timer));
+ }
+ for (int i = 0; i < HOOK_MAX; i++) {
+ err = mallctl("experimental.hooks.remove", NULL, NULL,
+ &handles[i], sizeof(handles[i]));
+ assert(err == 0);
+ }
+}
+
+static void
+test_unhooked(int iters) {
+ timedelta_t timer;
+ timer_start(&timer);
+ malloc_free_loop(iters);
+ timer_stop(&timer);
+
+ malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer));
+}
+
+int
+main(void) {
+ /* Initialize */
+ free(mallocx(1, 0));
+ int iters = 10 * 1000 * 1000;
+ malloc_printf("Benchmarking hooks with %d iterations:\n", iters);
+ test_hooked(iters);
+ test_unhooked(iters);
+}
diff --git a/deps/jemalloc/test/stress/microbench.c b/deps/jemalloc/test/stress/microbench.c
new file mode 100644
index 0000000..988b793
--- /dev/null
+++ b/deps/jemalloc/test/stress/microbench.c
@@ -0,0 +1,165 @@
+#include "test/jemalloc_test.h"
+
+static inline void
+time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
+ void (*func)(void)) {
+ uint64_t i;
+
+ for (i = 0; i < nwarmup; i++) {
+ func();
+ }
+ timer_start(timer);
+ for (i = 0; i < niter; i++) {
+ func();
+ }
+ timer_stop(timer);
+}
+
+void
+compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
+ void (*func_a), const char *name_b, void (*func_b)) {
+ timedelta_t timer_a, timer_b;
+ char ratio_buf[6];
+ void *p;
+
+ p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+
+ time_func(&timer_a, nwarmup, niter, func_a);
+ time_func(&timer_b, nwarmup, niter, func_b);
+
+ timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf));
+ malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, "
+ "%s=%"FMTu64"us, ratio=1:%s\n",
+ niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b),
+ ratio_buf);
+
+ dallocx(p, 0);
+}
+
+static void
+malloc_free(void) {
+ /* The compiler can optimize away free(malloc(1))! */
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ free(p);
+}
+
+static void
+mallocx_free(void) {
+ void *p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_malloc_vs_mallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc",
+ malloc_free, "mallocx", mallocx_free);
+}
+TEST_END
+
+static void
+malloc_dallocx(void) {
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ dallocx(p, 0);
+}
+
+static void
+malloc_sdallocx(void) {
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ sdallocx(p, 1, 0);
+}
+
+TEST_BEGIN(test_free_vs_dallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free,
+ "dallocx", malloc_dallocx);
+}
+TEST_END
+
+TEST_BEGIN(test_dallocx_vs_sdallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx,
+ "sdallocx", malloc_sdallocx);
+}
+TEST_END
+
+static void
+malloc_mus_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ malloc_usable_size(p);
+ free(p);
+}
+
+static void
+malloc_sallocx_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (sallocx(p, 0) < 1) {
+ test_fail("Unexpected sallocx() failure");
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_mus_vs_sallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size",
+ malloc_mus_free, "sallocx", malloc_sallocx_free);
+}
+TEST_END
+
+static void
+malloc_nallocx_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (nallocx(1, 0) < 1) {
+ test_fail("Unexpected nallocx() failure");
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_sallocx_vs_nallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "sallocx",
+ malloc_sallocx_free, "nallocx", malloc_nallocx_free);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_malloc_vs_mallocx,
+ test_free_vs_dallocx,
+ test_dallocx_vs_sdallocx,
+ test_mus_vs_sallocx,
+ test_sallocx_vs_nallocx);
+}
diff --git a/deps/jemalloc/test/test.sh.in b/deps/jemalloc/test/test.sh.in
new file mode 100644
index 0000000..39302ff
--- /dev/null
+++ b/deps/jemalloc/test/test.sh.in
@@ -0,0 +1,80 @@
+#!/bin/sh
+
+case @abi@ in
+ macho)
+ export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib"
+ ;;
+ pecoff)
+ export PATH="${PATH}:@objroot@lib"
+ ;;
+ *)
+ ;;
+esac
+
+# Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so
+# it can be repeatedly concatenated with per test settings.
+export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF}
+# Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL.
+export_malloc_conf() {
+ if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then
+ export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}"
+ else
+ export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}"
+ fi
+}
+
+# Corresponds to test_status_t.
+pass_code=0
+skip_code=1
+fail_code=2
+
+pass_count=0
+skip_count=0
+fail_count=0
+for t in $@; do
+ if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then
+ echo
+ fi
+ echo "=== ${t} ==="
+ if [ -e "@srcroot@${t}.sh" ] ; then
+ # Source the shell script corresponding to the test in a subshell and
+ # execute the test. This allows the shell script to set MALLOC_CONF, which
+ # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the
+ # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail).
+ enable_fill=@enable_fill@ \
+ enable_prof=@enable_prof@ \
+ . @srcroot@${t}.sh && \
+ export_malloc_conf && \
+ $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@
+ else
+ export MALLOC_CONF= && \
+ export_malloc_conf && \
+ $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@
+ fi
+ result_code=$?
+ case ${result_code} in
+ ${pass_code})
+ pass_count=$((pass_count+1))
+ ;;
+ ${skip_code})
+ skip_count=$((skip_count+1))
+ ;;
+ ${fail_code})
+ fail_count=$((fail_count+1))
+ ;;
+ *)
+ echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2
+ echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2
+ exit 1
+ esac
+done
+
+total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}`
+echo
+echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}"
+
+if [ ${fail_count} -eq 0 ] ; then
+ exit 0
+else
+ exit 1
+fi
diff --git a/deps/jemalloc/test/unit/SFMT.c b/deps/jemalloc/test/unit/SFMT.c
new file mode 100644
index 0000000..1fc8cf1
--- /dev/null
+++ b/deps/jemalloc/test/unit/SFMT.c
@@ -0,0 +1,1599 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "test/jemalloc_test.h"
+
+#define BLOCK_SIZE 10000
+#define BLOCK_SIZE64 (BLOCK_SIZE / 2)
+#define COUNT_1 1000
+#define COUNT_2 700
+
+static const uint32_t init_gen_rand_32_expected[] = {
+ 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U,
+ 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U,
+ 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U,
+ 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U,
+ 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U,
+ 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U,
+ 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U,
+ 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U,
+ 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U,
+ 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U,
+ 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U,
+ 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U,
+ 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U,
+ 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U,
+ 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U,
+ 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U,
+ 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U,
+ 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U,
+ 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U,
+ 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U,
+ 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U,
+ 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U,
+ 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U,
+ 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U,
+ 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U,
+ 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U,
+ 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U,
+ 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U,
+ 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U,
+ 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U,
+ 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U,
+ 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U,
+ 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U,
+ 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U,
+ 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U,
+ 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U,
+ 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U,
+ 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U,
+ 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U,
+ 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U,
+ 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U,
+ 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U,
+ 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U,
+ 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U,
+ 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U,
+ 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U,
+ 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U,
+ 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U,
+ 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U,
+ 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U,
+ 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U,
+ 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U,
+ 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U,
+ 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U,
+ 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U,
+ 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U,
+ 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U,
+ 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U,
+ 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U,
+ 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U,
+ 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U,
+ 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U,
+ 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U,
+ 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U,
+ 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U,
+ 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U,
+ 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U,
+ 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U,
+ 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U,
+ 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U,
+ 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U,
+ 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U,
+ 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U,
+ 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U,
+ 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U,
+ 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U,
+ 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U,
+ 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U,
+ 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U,
+ 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U,
+ 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U,
+ 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U,
+ 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U,
+ 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U,
+ 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U,
+ 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U,
+ 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U,
+ 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U,
+ 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U,
+ 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U,
+ 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U,
+ 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U,
+ 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U,
+ 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U,
+ 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U,
+ 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U,
+ 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U,
+ 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U,
+ 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U,
+ 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U,
+ 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U,
+ 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U,
+ 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U,
+ 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U,
+ 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U,
+ 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U,
+ 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U,
+ 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U,
+ 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U,
+ 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U,
+ 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U,
+ 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U,
+ 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U,
+ 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U,
+ 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U,
+ 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U,
+ 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U,
+ 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U,
+ 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U,
+ 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U,
+ 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U,
+ 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U,
+ 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U,
+ 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U,
+ 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U,
+ 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U,
+ 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U,
+ 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U,
+ 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U,
+ 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U,
+ 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U,
+ 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U,
+ 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U,
+ 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U,
+ 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U,
+ 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U,
+ 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U,
+ 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U,
+ 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U,
+ 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U,
+ 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U,
+ 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U,
+ 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U,
+ 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U,
+ 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U,
+ 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U,
+ 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U,
+ 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U,
+ 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U,
+ 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U,
+ 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U,
+ 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U,
+ 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U,
+ 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U,
+ 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U,
+ 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U,
+ 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U,
+ 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U,
+ 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U,
+ 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U,
+ 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U,
+ 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U,
+ 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U,
+ 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U,
+ 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U,
+ 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U,
+ 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U,
+ 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U,
+ 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U,
+ 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U,
+ 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U,
+ 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U,
+ 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U,
+ 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U,
+ 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U,
+ 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U,
+ 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U,
+ 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U,
+ 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U,
+ 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U,
+ 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U,
+ 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U,
+ 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U,
+ 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U,
+ 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U,
+ 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U,
+ 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U,
+ 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U,
+ 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U,
+ 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U,
+ 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U,
+ 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U,
+ 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U,
+ 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U,
+ 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U,
+ 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U,
+ 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U,
+ 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U,
+ 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U,
+ 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U
+};
+static const uint32_t init_by_array_32_expected[] = {
+ 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U,
+ 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U,
+ 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U,
+ 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U,
+ 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U,
+ 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U,
+ 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U,
+ 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U,
+ 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U,
+ 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U,
+ 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U,
+ 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U,
+ 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U,
+ 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U,
+ 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U,
+ 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U,
+ 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U,
+ 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U,
+ 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U,
+ 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U,
+ 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U,
+ 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U,
+ 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U,
+ 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U,
+ 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U,
+ 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U,
+ 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U,
+ 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U,
+ 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U,
+ 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U,
+ 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U,
+ 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U,
+ 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U,
+ 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U,
+ 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U,
+ 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U,
+ 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U,
+ 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U,
+ 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U,
+ 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U,
+ 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U,
+ 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U,
+ 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U,
+ 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U,
+ 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U,
+ 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U,
+ 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U,
+ 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U,
+ 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U,
+ 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U,
+ 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U,
+ 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U,
+ 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U,
+ 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U,
+ 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U,
+ 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U,
+ 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U,
+ 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U,
+ 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U,
+ 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U,
+ 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U,
+ 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U,
+ 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U,
+ 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U,
+ 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U,
+ 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U,
+ 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U,
+ 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U,
+ 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U,
+ 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U,
+ 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U,
+ 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U,
+ 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U,
+ 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U,
+ 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U,
+ 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U,
+ 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U,
+ 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U,
+ 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U,
+ 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U,
+ 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U,
+ 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U,
+ 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U,
+ 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U,
+ 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U,
+ 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U,
+ 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U,
+ 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U,
+ 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U,
+ 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U,
+ 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U,
+ 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U,
+ 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U,
+ 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U,
+ 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U,
+ 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U,
+ 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U,
+ 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U,
+ 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U,
+ 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U,
+ 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U,
+ 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U,
+ 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U,
+ 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U,
+ 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U,
+ 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U,
+ 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U,
+ 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U,
+ 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U,
+ 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U,
+ 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U,
+ 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U,
+ 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U,
+ 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U,
+ 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U,
+ 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U,
+ 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U,
+ 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U,
+ 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U,
+ 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U,
+ 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U,
+ 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U,
+ 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U,
+ 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U,
+ 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U,
+ 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U,
+ 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U,
+ 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U,
+ 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U,
+ 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U,
+ 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U,
+ 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U,
+ 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U,
+ 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U,
+ 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U,
+ 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U,
+ 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U,
+ 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U,
+ 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U,
+ 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U,
+ 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U,
+ 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U,
+ 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U,
+ 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U,
+ 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U,
+ 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U,
+ 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U,
+ 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U,
+ 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U,
+ 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U,
+ 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U,
+ 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U,
+ 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U,
+ 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U,
+ 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U,
+ 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U,
+ 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U,
+ 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U,
+ 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U,
+ 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U,
+ 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U,
+ 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U,
+ 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U,
+ 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U,
+ 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U,
+ 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U,
+ 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U,
+ 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U,
+ 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U,
+ 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U,
+ 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U,
+ 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U,
+ 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U,
+ 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U,
+ 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U,
+ 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U,
+ 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U,
+ 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U,
+ 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U,
+ 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U,
+ 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U,
+ 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U,
+ 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U,
+ 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U,
+ 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U,
+ 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U,
+ 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U,
+ 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U,
+ 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U,
+ 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U,
+ 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U,
+ 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U,
+ 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U,
+ 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U,
+ 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U,
+ 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U,
+ 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U,
+ 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U,
+ 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U,
+ 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U
+};
+static const uint64_t init_gen_rand_64_expected[] = {
+ KQU(16924766246869039260), KQU( 8201438687333352714),
+ KQU( 2265290287015001750), KQU(18397264611805473832),
+ KQU( 3375255223302384358), KQU( 6345559975416828796),
+ KQU(18229739242790328073), KQU( 7596792742098800905),
+ KQU( 255338647169685981), KQU( 2052747240048610300),
+ KQU(18328151576097299343), KQU(12472905421133796567),
+ KQU(11315245349717600863), KQU(16594110197775871209),
+ KQU(15708751964632456450), KQU(10452031272054632535),
+ KQU(11097646720811454386), KQU( 4556090668445745441),
+ KQU(17116187693090663106), KQU(14931526836144510645),
+ KQU( 9190752218020552591), KQU( 9625800285771901401),
+ KQU(13995141077659972832), KQU( 5194209094927829625),
+ KQU( 4156788379151063303), KQU( 8523452593770139494),
+ KQU(14082382103049296727), KQU( 2462601863986088483),
+ KQU( 3030583461592840678), KQU( 5221622077872827681),
+ KQU( 3084210671228981236), KQU(13956758381389953823),
+ KQU(13503889856213423831), KQU(15696904024189836170),
+ KQU( 4612584152877036206), KQU( 6231135538447867881),
+ KQU(10172457294158869468), KQU( 6452258628466708150),
+ KQU(14044432824917330221), KQU( 370168364480044279),
+ KQU(10102144686427193359), KQU( 667870489994776076),
+ KQU( 2732271956925885858), KQU(18027788905977284151),
+ KQU(15009842788582923859), KQU( 7136357960180199542),
+ KQU(15901736243475578127), KQU(16951293785352615701),
+ KQU(10551492125243691632), KQU(17668869969146434804),
+ KQU(13646002971174390445), KQU( 9804471050759613248),
+ KQU( 5511670439655935493), KQU(18103342091070400926),
+ KQU(17224512747665137533), KQU(15534627482992618168),
+ KQU( 1423813266186582647), KQU(15821176807932930024),
+ KQU( 30323369733607156), KQU(11599382494723479403),
+ KQU( 653856076586810062), KQU( 3176437395144899659),
+ KQU(14028076268147963917), KQU(16156398271809666195),
+ KQU( 3166955484848201676), KQU( 5746805620136919390),
+ KQU(17297845208891256593), KQU(11691653183226428483),
+ KQU(17900026146506981577), KQU(15387382115755971042),
+ KQU(16923567681040845943), KQU( 8039057517199388606),
+ KQU(11748409241468629263), KQU( 794358245539076095),
+ KQU(13438501964693401242), KQU(14036803236515618962),
+ KQU( 5252311215205424721), KQU(17806589612915509081),
+ KQU( 6802767092397596006), KQU(14212120431184557140),
+ KQU( 1072951366761385712), KQU(13098491780722836296),
+ KQU( 9466676828710797353), KQU(12673056849042830081),
+ KQU(12763726623645357580), KQU(16468961652999309493),
+ KQU(15305979875636438926), KQU(17444713151223449734),
+ KQU( 5692214267627883674), KQU(13049589139196151505),
+ KQU( 880115207831670745), KQU( 1776529075789695498),
+ KQU(16695225897801466485), KQU(10666901778795346845),
+ KQU( 6164389346722833869), KQU( 2863817793264300475),
+ KQU( 9464049921886304754), KQU( 3993566636740015468),
+ KQU( 9983749692528514136), KQU(16375286075057755211),
+ KQU(16042643417005440820), KQU(11445419662923489877),
+ KQU( 7999038846885158836), KQU( 6721913661721511535),
+ KQU( 5363052654139357320), KQU( 1817788761173584205),
+ KQU(13290974386445856444), KQU( 4650350818937984680),
+ KQU( 8219183528102484836), KQU( 1569862923500819899),
+ KQU( 4189359732136641860), KQU(14202822961683148583),
+ KQU( 4457498315309429058), KQU(13089067387019074834),
+ KQU(11075517153328927293), KQU(10277016248336668389),
+ KQU( 7070509725324401122), KQU(17808892017780289380),
+ KQU(13143367339909287349), KQU( 1377743745360085151),
+ KQU( 5749341807421286485), KQU(14832814616770931325),
+ KQU( 7688820635324359492), KQU(10960474011539770045),
+ KQU( 81970066653179790), KQU(12619476072607878022),
+ KQU( 4419566616271201744), KQU(15147917311750568503),
+ KQU( 5549739182852706345), KQU( 7308198397975204770),
+ KQU(13580425496671289278), KQU(17070764785210130301),
+ KQU( 8202832846285604405), KQU( 6873046287640887249),
+ KQU( 6927424434308206114), KQU( 6139014645937224874),
+ KQU(10290373645978487639), KQU(15904261291701523804),
+ KQU( 9628743442057826883), KQU(18383429096255546714),
+ KQU( 4977413265753686967), KQU( 7714317492425012869),
+ KQU( 9025232586309926193), KQU(14627338359776709107),
+ KQU(14759849896467790763), KQU(10931129435864423252),
+ KQU( 4588456988775014359), KQU(10699388531797056724),
+ KQU( 468652268869238792), KQU( 5755943035328078086),
+ KQU( 2102437379988580216), KQU( 9986312786506674028),
+ KQU( 2654207180040945604), KQU( 8726634790559960062),
+ KQU( 100497234871808137), KQU( 2800137176951425819),
+ KQU( 6076627612918553487), KQU( 5780186919186152796),
+ KQU( 8179183595769929098), KQU( 6009426283716221169),
+ KQU( 2796662551397449358), KQU( 1756961367041986764),
+ KQU( 6972897917355606205), KQU(14524774345368968243),
+ KQU( 2773529684745706940), KQU( 4853632376213075959),
+ KQU( 4198177923731358102), KQU( 8271224913084139776),
+ KQU( 2741753121611092226), KQU(16782366145996731181),
+ KQU(15426125238972640790), KQU(13595497100671260342),
+ KQU( 3173531022836259898), KQU( 6573264560319511662),
+ KQU(18041111951511157441), KQU( 2351433581833135952),
+ KQU( 3113255578908173487), KQU( 1739371330877858784),
+ KQU(16046126562789165480), KQU( 8072101652214192925),
+ KQU(15267091584090664910), KQU( 9309579200403648940),
+ KQU( 5218892439752408722), KQU(14492477246004337115),
+ KQU(17431037586679770619), KQU( 7385248135963250480),
+ KQU( 9580144956565560660), KQU( 4919546228040008720),
+ KQU(15261542469145035584), KQU(18233297270822253102),
+ KQU( 5453248417992302857), KQU( 9309519155931460285),
+ KQU(10342813012345291756), KQU(15676085186784762381),
+ KQU(15912092950691300645), KQU( 9371053121499003195),
+ KQU( 9897186478226866746), KQU(14061858287188196327),
+ KQU( 122575971620788119), KQU(12146750969116317754),
+ KQU( 4438317272813245201), KQU( 8332576791009527119),
+ KQU(13907785691786542057), KQU(10374194887283287467),
+ KQU( 2098798755649059566), KQU( 3416235197748288894),
+ KQU( 8688269957320773484), KQU( 7503964602397371571),
+ KQU(16724977015147478236), KQU( 9461512855439858184),
+ KQU(13259049744534534727), KQU( 3583094952542899294),
+ KQU( 8764245731305528292), KQU(13240823595462088985),
+ KQU(13716141617617910448), KQU(18114969519935960955),
+ KQU( 2297553615798302206), KQU( 4585521442944663362),
+ KQU(17776858680630198686), KQU( 4685873229192163363),
+ KQU( 152558080671135627), KQU(15424900540842670088),
+ KQU(13229630297130024108), KQU(17530268788245718717),
+ KQU(16675633913065714144), KQU( 3158912717897568068),
+ KQU(15399132185380087288), KQU( 7401418744515677872),
+ KQU(13135412922344398535), KQU( 6385314346100509511),
+ KQU(13962867001134161139), KQU(10272780155442671999),
+ KQU(12894856086597769142), KQU(13340877795287554994),
+ KQU(12913630602094607396), KQU(12543167911119793857),
+ KQU(17343570372251873096), KQU(10959487764494150545),
+ KQU( 6966737953093821128), KQU(13780699135496988601),
+ KQU( 4405070719380142046), KQU(14923788365607284982),
+ KQU( 2869487678905148380), KQU( 6416272754197188403),
+ KQU(15017380475943612591), KQU( 1995636220918429487),
+ KQU( 3402016804620122716), KQU(15800188663407057080),
+ KQU(11362369990390932882), KQU(15262183501637986147),
+ KQU(10239175385387371494), KQU( 9352042420365748334),
+ KQU( 1682457034285119875), KQU( 1724710651376289644),
+ KQU( 2038157098893817966), KQU( 9897825558324608773),
+ KQU( 1477666236519164736), KQU(16835397314511233640),
+ KQU(10370866327005346508), KQU(10157504370660621982),
+ KQU(12113904045335882069), KQU(13326444439742783008),
+ KQU(11302769043000765804), KQU(13594979923955228484),
+ KQU(11779351762613475968), KQU( 3786101619539298383),
+ KQU( 8021122969180846063), KQU(15745904401162500495),
+ KQU(10762168465993897267), KQU(13552058957896319026),
+ KQU(11200228655252462013), KQU( 5035370357337441226),
+ KQU( 7593918984545500013), KQU( 5418554918361528700),
+ KQU( 4858270799405446371), KQU( 9974659566876282544),
+ KQU(18227595922273957859), KQU( 2772778443635656220),
+ KQU(14285143053182085385), KQU( 9939700992429600469),
+ KQU(12756185904545598068), KQU( 2020783375367345262),
+ KQU( 57026775058331227), KQU( 950827867930065454),
+ KQU( 6602279670145371217), KQU( 2291171535443566929),
+ KQU( 5832380724425010313), KQU( 1220343904715982285),
+ KQU(17045542598598037633), KQU(15460481779702820971),
+ KQU(13948388779949365130), KQU(13975040175430829518),
+ KQU(17477538238425541763), KQU(11104663041851745725),
+ KQU(15860992957141157587), KQU(14529434633012950138),
+ KQU( 2504838019075394203), KQU( 7512113882611121886),
+ KQU( 4859973559980886617), KQU( 1258601555703250219),
+ KQU(15594548157514316394), KQU( 4516730171963773048),
+ KQU(11380103193905031983), KQU( 6809282239982353344),
+ KQU(18045256930420065002), KQU( 2453702683108791859),
+ KQU( 977214582986981460), KQU( 2006410402232713466),
+ KQU( 6192236267216378358), KQU( 3429468402195675253),
+ KQU(18146933153017348921), KQU(17369978576367231139),
+ KQU( 1246940717230386603), KQU(11335758870083327110),
+ KQU(14166488801730353682), KQU( 9008573127269635732),
+ KQU(10776025389820643815), KQU(15087605441903942962),
+ KQU( 1359542462712147922), KQU(13898874411226454206),
+ KQU(17911176066536804411), KQU( 9435590428600085274),
+ KQU( 294488509967864007), KQU( 8890111397567922046),
+ KQU( 7987823476034328778), KQU(13263827582440967651),
+ KQU( 7503774813106751573), KQU(14974747296185646837),
+ KQU( 8504765037032103375), KQU(17340303357444536213),
+ KQU( 7704610912964485743), KQU( 8107533670327205061),
+ KQU( 9062969835083315985), KQU(16968963142126734184),
+ KQU(12958041214190810180), KQU( 2720170147759570200),
+ KQU( 2986358963942189566), KQU(14884226322219356580),
+ KQU( 286224325144368520), KQU(11313800433154279797),
+ KQU(18366849528439673248), KQU(17899725929482368789),
+ KQU( 3730004284609106799), KQU( 1654474302052767205),
+ KQU( 5006698007047077032), KQU( 8196893913601182838),
+ KQU(15214541774425211640), KQU(17391346045606626073),
+ KQU( 8369003584076969089), KQU( 3939046733368550293),
+ KQU(10178639720308707785), KQU( 2180248669304388697),
+ KQU( 62894391300126322), KQU( 9205708961736223191),
+ KQU( 6837431058165360438), KQU( 3150743890848308214),
+ KQU(17849330658111464583), KQU(12214815643135450865),
+ KQU(13410713840519603402), KQU( 3200778126692046802),
+ KQU(13354780043041779313), KQU( 800850022756886036),
+ KQU(15660052933953067433), KQU( 6572823544154375676),
+ KQU(11030281857015819266), KQU(12682241941471433835),
+ KQU(11654136407300274693), KQU( 4517795492388641109),
+ KQU( 9757017371504524244), KQU(17833043400781889277),
+ KQU(12685085201747792227), KQU(10408057728835019573),
+ KQU( 98370418513455221), KQU( 6732663555696848598),
+ KQU(13248530959948529780), KQU( 3530441401230622826),
+ KQU(18188251992895660615), KQU( 1847918354186383756),
+ KQU( 1127392190402660921), KQU(11293734643143819463),
+ KQU( 3015506344578682982), KQU(13852645444071153329),
+ KQU( 2121359659091349142), KQU( 1294604376116677694),
+ KQU( 5616576231286352318), KQU( 7112502442954235625),
+ KQU(11676228199551561689), KQU(12925182803007305359),
+ KQU( 7852375518160493082), KQU( 1136513130539296154),
+ KQU( 5636923900916593195), KQU( 3221077517612607747),
+ KQU(17784790465798152513), KQU( 3554210049056995938),
+ KQU(17476839685878225874), KQU( 3206836372585575732),
+ KQU( 2765333945644823430), KQU(10080070903718799528),
+ KQU( 5412370818878286353), KQU( 9689685887726257728),
+ KQU( 8236117509123533998), KQU( 1951139137165040214),
+ KQU( 4492205209227980349), KQU(16541291230861602967),
+ KQU( 1424371548301437940), KQU( 9117562079669206794),
+ KQU(14374681563251691625), KQU(13873164030199921303),
+ KQU( 6680317946770936731), KQU(15586334026918276214),
+ KQU(10896213950976109802), KQU( 9506261949596413689),
+ KQU( 9903949574308040616), KQU( 6038397344557204470),
+ KQU( 174601465422373648), KQU(15946141191338238030),
+ KQU(17142225620992044937), KQU( 7552030283784477064),
+ KQU( 2947372384532947997), KQU( 510797021688197711),
+ KQU( 4962499439249363461), KQU( 23770320158385357),
+ KQU( 959774499105138124), KQU( 1468396011518788276),
+ KQU( 2015698006852312308), KQU( 4149400718489980136),
+ KQU( 5992916099522371188), KQU(10819182935265531076),
+ KQU(16189787999192351131), KQU( 342833961790261950),
+ KQU(12470830319550495336), KQU(18128495041912812501),
+ KQU( 1193600899723524337), KQU( 9056793666590079770),
+ KQU( 2154021227041669041), KQU( 4963570213951235735),
+ KQU( 4865075960209211409), KQU( 2097724599039942963),
+ KQU( 2024080278583179845), KQU(11527054549196576736),
+ KQU(10650256084182390252), KQU( 4808408648695766755),
+ KQU( 1642839215013788844), KQU(10607187948250398390),
+ KQU( 7076868166085913508), KQU( 730522571106887032),
+ KQU(12500579240208524895), KQU( 4484390097311355324),
+ KQU(15145801330700623870), KQU( 8055827661392944028),
+ KQU( 5865092976832712268), KQU(15159212508053625143),
+ KQU( 3560964582876483341), KQU( 4070052741344438280),
+ KQU( 6032585709886855634), KQU(15643262320904604873),
+ KQU( 2565119772293371111), KQU( 318314293065348260),
+ KQU(15047458749141511872), KQU( 7772788389811528730),
+ KQU( 7081187494343801976), KQU( 6465136009467253947),
+ KQU(10425940692543362069), KQU( 554608190318339115),
+ KQU(14796699860302125214), KQU( 1638153134431111443),
+ KQU(10336967447052276248), KQU( 8412308070396592958),
+ KQU( 4004557277152051226), KQU( 8143598997278774834),
+ KQU(16413323996508783221), KQU(13139418758033994949),
+ KQU( 9772709138335006667), KQU( 2818167159287157659),
+ KQU(17091740573832523669), KQU(14629199013130751608),
+ KQU(18268322711500338185), KQU( 8290963415675493063),
+ KQU( 8830864907452542588), KQU( 1614839084637494849),
+ KQU(14855358500870422231), KQU( 3472996748392519937),
+ KQU(15317151166268877716), KQU( 5825895018698400362),
+ KQU(16730208429367544129), KQU(10481156578141202800),
+ KQU( 4746166512382823750), KQU(12720876014472464998),
+ KQU( 8825177124486735972), KQU(13733447296837467838),
+ KQU( 6412293741681359625), KQU( 8313213138756135033),
+ KQU(11421481194803712517), KQU( 7997007691544174032),
+ KQU( 6812963847917605930), KQU( 9683091901227558641),
+ KQU(14703594165860324713), KQU( 1775476144519618309),
+ KQU( 2724283288516469519), KQU( 717642555185856868),
+ KQU( 8736402192215092346), KQU(11878800336431381021),
+ KQU( 4348816066017061293), KQU( 6115112756583631307),
+ KQU( 9176597239667142976), KQU(12615622714894259204),
+ KQU(10283406711301385987), KQU( 5111762509485379420),
+ KQU( 3118290051198688449), KQU( 7345123071632232145),
+ KQU( 9176423451688682359), KQU( 4843865456157868971),
+ KQU(12008036363752566088), KQU(12058837181919397720),
+ KQU( 2145073958457347366), KQU( 1526504881672818067),
+ KQU( 3488830105567134848), KQU(13208362960674805143),
+ KQU( 4077549672899572192), KQU( 7770995684693818365),
+ KQU( 1398532341546313593), KQU(12711859908703927840),
+ KQU( 1417561172594446813), KQU(17045191024194170604),
+ KQU( 4101933177604931713), KQU(14708428834203480320),
+ KQU(17447509264469407724), KQU(14314821973983434255),
+ KQU(17990472271061617265), KQU( 5087756685841673942),
+ KQU(12797820586893859939), KQU( 1778128952671092879),
+ KQU( 3535918530508665898), KQU( 9035729701042481301),
+ KQU(14808661568277079962), KQU(14587345077537747914),
+ KQU(11920080002323122708), KQU( 6426515805197278753),
+ KQU( 3295612216725984831), KQU(11040722532100876120),
+ KQU(12305952936387598754), KQU(16097391899742004253),
+ KQU( 4908537335606182208), KQU(12446674552196795504),
+ KQU(16010497855816895177), KQU( 9194378874788615551),
+ KQU( 3382957529567613384), KQU( 5154647600754974077),
+ KQU( 9801822865328396141), KQU( 9023662173919288143),
+ KQU(17623115353825147868), KQU( 8238115767443015816),
+ KQU(15811444159859002560), KQU( 9085612528904059661),
+ KQU( 6888601089398614254), KQU( 258252992894160189),
+ KQU( 6704363880792428622), KQU( 6114966032147235763),
+ KQU(11075393882690261875), KQU( 8797664238933620407),
+ KQU( 5901892006476726920), KQU( 5309780159285518958),
+ KQU(14940808387240817367), KQU(14642032021449656698),
+ KQU( 9808256672068504139), KQU( 3670135111380607658),
+ KQU(11211211097845960152), KQU( 1474304506716695808),
+ KQU(15843166204506876239), KQU( 7661051252471780561),
+ KQU(10170905502249418476), KQU( 7801416045582028589),
+ KQU( 2763981484737053050), KQU( 9491377905499253054),
+ KQU(16201395896336915095), KQU( 9256513756442782198),
+ KQU( 5411283157972456034), KQU( 5059433122288321676),
+ KQU( 4327408006721123357), KQU( 9278544078834433377),
+ KQU( 7601527110882281612), KQU(11848295896975505251),
+ KQU(12096998801094735560), KQU(14773480339823506413),
+ KQU(15586227433895802149), KQU(12786541257830242872),
+ KQU( 6904692985140503067), KQU( 5309011515263103959),
+ KQU(12105257191179371066), KQU(14654380212442225037),
+ KQU( 2556774974190695009), KQU( 4461297399927600261),
+ KQU(14888225660915118646), KQU(14915459341148291824),
+ KQU( 2738802166252327631), KQU( 6047155789239131512),
+ KQU(12920545353217010338), KQU(10697617257007840205),
+ KQU( 2751585253158203504), KQU(13252729159780047496),
+ KQU(14700326134672815469), KQU(14082527904374600529),
+ KQU(16852962273496542070), KQU(17446675504235853907),
+ KQU(15019600398527572311), KQU(12312781346344081551),
+ KQU(14524667935039810450), KQU( 5634005663377195738),
+ KQU(11375574739525000569), KQU( 2423665396433260040),
+ KQU( 5222836914796015410), KQU( 4397666386492647387),
+ KQU( 4619294441691707638), KQU( 665088602354770716),
+ KQU(13246495665281593610), KQU( 6564144270549729409),
+ KQU(10223216188145661688), KQU( 3961556907299230585),
+ KQU(11543262515492439914), KQU(16118031437285993790),
+ KQU( 7143417964520166465), KQU(13295053515909486772),
+ KQU( 40434666004899675), KQU(17127804194038347164),
+ KQU( 8599165966560586269), KQU( 8214016749011284903),
+ KQU(13725130352140465239), KQU( 5467254474431726291),
+ KQU( 7748584297438219877), KQU(16933551114829772472),
+ KQU( 2169618439506799400), KQU( 2169787627665113463),
+ KQU(17314493571267943764), KQU(18053575102911354912),
+ KQU(11928303275378476973), KQU(11593850925061715550),
+ KQU(17782269923473589362), KQU( 3280235307704747039),
+ KQU( 6145343578598685149), KQU(17080117031114086090),
+ KQU(18066839902983594755), KQU( 6517508430331020706),
+ KQU( 8092908893950411541), KQU(12558378233386153732),
+ KQU( 4476532167973132976), KQU(16081642430367025016),
+ KQU( 4233154094369139361), KQU( 8693630486693161027),
+ KQU(11244959343027742285), KQU(12273503967768513508),
+ KQU(14108978636385284876), KQU( 7242414665378826984),
+ KQU( 6561316938846562432), KQU( 8601038474994665795),
+ KQU(17532942353612365904), KQU(17940076637020912186),
+ KQU( 7340260368823171304), KQU( 7061807613916067905),
+ KQU(10561734935039519326), KQU(17990796503724650862),
+ KQU( 6208732943911827159), KQU( 359077562804090617),
+ KQU(14177751537784403113), KQU(10659599444915362902),
+ KQU(15081727220615085833), KQU(13417573895659757486),
+ KQU(15513842342017811524), KQU(11814141516204288231),
+ KQU( 1827312513875101814), KQU( 2804611699894603103),
+ KQU(17116500469975602763), KQU(12270191815211952087),
+ KQU(12256358467786024988), KQU(18435021722453971267),
+ KQU( 671330264390865618), KQU( 476504300460286050),
+ KQU(16465470901027093441), KQU( 4047724406247136402),
+ KQU( 1322305451411883346), KQU( 1388308688834322280),
+ KQU( 7303989085269758176), KQU( 9323792664765233642),
+ KQU( 4542762575316368936), KQU(17342696132794337618),
+ KQU( 4588025054768498379), KQU(13415475057390330804),
+ KQU(17880279491733405570), KQU(10610553400618620353),
+ KQU( 3180842072658960139), KQU(13002966655454270120),
+ KQU( 1665301181064982826), KQU( 7083673946791258979),
+ KQU( 190522247122496820), KQU(17388280237250677740),
+ KQU( 8430770379923642945), KQU(12987180971921668584),
+ KQU( 2311086108365390642), KQU( 2870984383579822345),
+ KQU(14014682609164653318), KQU(14467187293062251484),
+ KQU( 192186361147413298), KQU(15171951713531796524),
+ KQU( 9900305495015948728), KQU(17958004775615466344),
+ KQU(14346380954498606514), KQU(18040047357617407096),
+ KQU( 5035237584833424532), KQU(15089555460613972287),
+ KQU( 4131411873749729831), KQU( 1329013581168250330),
+ KQU(10095353333051193949), KQU(10749518561022462716),
+ KQU( 9050611429810755847), KQU(15022028840236655649),
+ KQU( 8775554279239748298), KQU(13105754025489230502),
+ KQU(15471300118574167585), KQU( 89864764002355628),
+ KQU( 8776416323420466637), KQU( 5280258630612040891),
+ KQU( 2719174488591862912), KQU( 7599309137399661994),
+ KQU(15012887256778039979), KQU(14062981725630928925),
+ KQU(12038536286991689603), KQU( 7089756544681775245),
+ KQU(10376661532744718039), KQU( 1265198725901533130),
+ KQU(13807996727081142408), KQU( 2935019626765036403),
+ KQU( 7651672460680700141), KQU( 3644093016200370795),
+ KQU( 2840982578090080674), KQU(17956262740157449201),
+ KQU(18267979450492880548), KQU(11799503659796848070),
+ KQU( 9942537025669672388), KQU(11886606816406990297),
+ KQU( 5488594946437447576), KQU( 7226714353282744302),
+ KQU( 3784851653123877043), KQU( 878018453244803041),
+ KQU(12110022586268616085), KQU( 734072179404675123),
+ KQU(11869573627998248542), KQU( 469150421297783998),
+ KQU( 260151124912803804), KQU(11639179410120968649),
+ KQU( 9318165193840846253), KQU(12795671722734758075),
+ KQU(15318410297267253933), KQU( 691524703570062620),
+ KQU( 5837129010576994601), KQU(15045963859726941052),
+ KQU( 5850056944932238169), KQU(12017434144750943807),
+ KQU( 7447139064928956574), KQU( 3101711812658245019),
+ KQU(16052940704474982954), KQU(18195745945986994042),
+ KQU( 8932252132785575659), KQU(13390817488106794834),
+ KQU(11582771836502517453), KQU( 4964411326683611686),
+ KQU( 2195093981702694011), KQU(14145229538389675669),
+ KQU(16459605532062271798), KQU( 866316924816482864),
+ KQU( 4593041209937286377), KQU( 8415491391910972138),
+ KQU( 4171236715600528969), KQU(16637569303336782889),
+ KQU( 2002011073439212680), KQU(17695124661097601411),
+ KQU( 4627687053598611702), KQU( 7895831936020190403),
+ KQU( 8455951300917267802), KQU( 2923861649108534854),
+ KQU( 8344557563927786255), KQU( 6408671940373352556),
+ KQU(12210227354536675772), KQU(14294804157294222295),
+ KQU(10103022425071085127), KQU(10092959489504123771),
+ KQU( 6554774405376736268), KQU(12629917718410641774),
+ KQU( 6260933257596067126), KQU( 2460827021439369673),
+ KQU( 2541962996717103668), KQU( 597377203127351475),
+ KQU( 5316984203117315309), KQU( 4811211393563241961),
+ KQU(13119698597255811641), KQU( 8048691512862388981),
+ KQU(10216818971194073842), KQU( 4612229970165291764),
+ KQU(10000980798419974770), KQU( 6877640812402540687),
+ KQU( 1488727563290436992), KQU( 2227774069895697318),
+ KQU(11237754507523316593), KQU(13478948605382290972),
+ KQU( 1963583846976858124), KQU( 5512309205269276457),
+ KQU( 3972770164717652347), KQU( 3841751276198975037),
+ KQU(10283343042181903117), KQU( 8564001259792872199),
+ KQU(16472187244722489221), KQU( 8953493499268945921),
+ KQU( 3518747340357279580), KQU( 4003157546223963073),
+ KQU( 3270305958289814590), KQU( 3966704458129482496),
+ KQU( 8122141865926661939), KQU(14627734748099506653),
+ KQU(13064426990862560568), KQU( 2414079187889870829),
+ KQU( 5378461209354225306), KQU(10841985740128255566),
+ KQU( 538582442885401738), KQU( 7535089183482905946),
+ KQU(16117559957598879095), KQU( 8477890721414539741),
+ KQU( 1459127491209533386), KQU(17035126360733620462),
+ KQU( 8517668552872379126), KQU(10292151468337355014),
+ KQU(17081267732745344157), KQU(13751455337946087178),
+ KQU(14026945459523832966), KQU( 6653278775061723516),
+ KQU(10619085543856390441), KQU( 2196343631481122885),
+ KQU(10045966074702826136), KQU(10082317330452718282),
+ KQU( 5920859259504831242), KQU( 9951879073426540617),
+ KQU( 7074696649151414158), KQU(15808193543879464318),
+ KQU( 7385247772746953374), KQU( 3192003544283864292),
+ KQU(18153684490917593847), KQU(12423498260668568905),
+ KQU(10957758099756378169), KQU(11488762179911016040),
+ KQU( 2099931186465333782), KQU(11180979581250294432),
+ KQU( 8098916250668367933), KQU( 3529200436790763465),
+ KQU(12988418908674681745), KQU( 6147567275954808580),
+ KQU( 3207503344604030989), KQU(10761592604898615360),
+ KQU( 229854861031893504), KQU( 8809853962667144291),
+ KQU(13957364469005693860), KQU( 7634287665224495886),
+ KQU(12353487366976556874), KQU( 1134423796317152034),
+ KQU( 2088992471334107068), KQU( 7393372127190799698),
+ KQU( 1845367839871058391), KQU( 207922563987322884),
+ KQU(11960870813159944976), KQU(12182120053317317363),
+ KQU(17307358132571709283), KQU(13871081155552824936),
+ KQU(18304446751741566262), KQU( 7178705220184302849),
+ KQU(10929605677758824425), KQU(16446976977835806844),
+ KQU(13723874412159769044), KQU( 6942854352100915216),
+ KQU( 1726308474365729390), KQU( 2150078766445323155),
+ KQU(15345558947919656626), KQU(12145453828874527201),
+ KQU( 2054448620739726849), KQU( 2740102003352628137),
+ KQU(11294462163577610655), KQU( 756164283387413743),
+ KQU(17841144758438810880), KQU(10802406021185415861),
+ KQU( 8716455530476737846), KQU( 6321788834517649606),
+ KQU(14681322910577468426), KQU(17330043563884336387),
+ KQU(12701802180050071614), KQU(14695105111079727151),
+ KQU( 5112098511654172830), KQU( 4957505496794139973),
+ KQU( 8270979451952045982), KQU(12307685939199120969),
+ KQU(12425799408953443032), KQU( 8376410143634796588),
+ KQU(16621778679680060464), KQU( 3580497854566660073),
+ KQU( 1122515747803382416), KQU( 857664980960597599),
+ KQU( 6343640119895925918), KQU(12878473260854462891),
+ KQU(10036813920765722626), KQU(14451335468363173812),
+ KQU( 5476809692401102807), KQU(16442255173514366342),
+ KQU(13060203194757167104), KQU(14354124071243177715),
+ KQU(15961249405696125227), KQU(13703893649690872584),
+ KQU( 363907326340340064), KQU( 6247455540491754842),
+ KQU(12242249332757832361), KQU( 156065475679796717),
+ KQU( 9351116235749732355), KQU( 4590350628677701405),
+ KQU( 1671195940982350389), KQU(13501398458898451905),
+ KQU( 6526341991225002255), KQU( 1689782913778157592),
+ KQU( 7439222350869010334), KQU(13975150263226478308),
+ KQU(11411961169932682710), KQU(17204271834833847277),
+ KQU( 541534742544435367), KQU( 6591191931218949684),
+ KQU( 2645454775478232486), KQU( 4322857481256485321),
+ KQU( 8477416487553065110), KQU(12902505428548435048),
+ KQU( 971445777981341415), KQU(14995104682744976712),
+ KQU( 4243341648807158063), KQU( 8695061252721927661),
+ KQU( 5028202003270177222), KQU( 2289257340915567840),
+ KQU(13870416345121866007), KQU(13994481698072092233),
+ KQU( 6912785400753196481), KQU( 2278309315841980139),
+ KQU( 4329765449648304839), KQU( 5963108095785485298),
+ KQU( 4880024847478722478), KQU(16015608779890240947),
+ KQU( 1866679034261393544), KQU( 914821179919731519),
+ KQU( 9643404035648760131), KQU( 2418114953615593915),
+ KQU( 944756836073702374), KQU(15186388048737296834),
+ KQU( 7723355336128442206), KQU( 7500747479679599691),
+ KQU(18013961306453293634), KQU( 2315274808095756456),
+ KQU(13655308255424029566), KQU(17203800273561677098),
+ KQU( 1382158694422087756), KQU( 5090390250309588976),
+ KQU( 517170818384213989), KQU( 1612709252627729621),
+ KQU( 1330118955572449606), KQU( 300922478056709885),
+ KQU(18115693291289091987), KQU(13491407109725238321),
+ KQU(15293714633593827320), KQU( 5151539373053314504),
+ KQU( 5951523243743139207), KQU(14459112015249527975),
+ KQU( 5456113959000700739), KQU( 3877918438464873016),
+ KQU(12534071654260163555), KQU(15871678376893555041),
+ KQU(11005484805712025549), KQU(16353066973143374252),
+ KQU( 4358331472063256685), KQU( 8268349332210859288),
+ KQU(12485161590939658075), KQU(13955993592854471343),
+ KQU( 5911446886848367039), KQU(14925834086813706974),
+ KQU( 6590362597857994805), KQU( 1280544923533661875),
+ KQU( 1637756018947988164), KQU( 4734090064512686329),
+ KQU(16693705263131485912), KQU( 6834882340494360958),
+ KQU( 8120732176159658505), KQU( 2244371958905329346),
+ KQU(10447499707729734021), KQU( 7318742361446942194),
+ KQU( 8032857516355555296), KQU(14023605983059313116),
+ KQU( 1032336061815461376), KQU( 9840995337876562612),
+ KQU( 9869256223029203587), KQU(12227975697177267636),
+ KQU(12728115115844186033), KQU( 7752058479783205470),
+ KQU( 729733219713393087), KQU(12954017801239007622)
+};
+static const uint64_t init_by_array_64_expected[] = {
+ KQU( 2100341266307895239), KQU( 8344256300489757943),
+ KQU(15687933285484243894), KQU( 8268620370277076319),
+ KQU(12371852309826545459), KQU( 8800491541730110238),
+ KQU(18113268950100835773), KQU( 2886823658884438119),
+ KQU( 3293667307248180724), KQU( 9307928143300172731),
+ KQU( 7688082017574293629), KQU( 900986224735166665),
+ KQU( 9977972710722265039), KQU( 6008205004994830552),
+ KQU( 546909104521689292), KQU( 7428471521869107594),
+ KQU(14777563419314721179), KQU(16116143076567350053),
+ KQU( 5322685342003142329), KQU( 4200427048445863473),
+ KQU( 4693092150132559146), KQU(13671425863759338582),
+ KQU( 6747117460737639916), KQU( 4732666080236551150),
+ KQU( 5912839950611941263), KQU( 3903717554504704909),
+ KQU( 2615667650256786818), KQU(10844129913887006352),
+ KQU(13786467861810997820), KQU(14267853002994021570),
+ KQU(13767807302847237439), KQU(16407963253707224617),
+ KQU( 4802498363698583497), KQU( 2523802839317209764),
+ KQU( 3822579397797475589), KQU( 8950320572212130610),
+ KQU( 3745623504978342534), KQU(16092609066068482806),
+ KQU( 9817016950274642398), KQU(10591660660323829098),
+ KQU(11751606650792815920), KQU( 5122873818577122211),
+ KQU(17209553764913936624), KQU( 6249057709284380343),
+ KQU(15088791264695071830), KQU(15344673071709851930),
+ KQU( 4345751415293646084), KQU( 2542865750703067928),
+ KQU(13520525127852368784), KQU(18294188662880997241),
+ KQU( 3871781938044881523), KQU( 2873487268122812184),
+ KQU(15099676759482679005), KQU(15442599127239350490),
+ KQU( 6311893274367710888), KQU( 3286118760484672933),
+ KQU( 4146067961333542189), KQU(13303942567897208770),
+ KQU( 8196013722255630418), KQU( 4437815439340979989),
+ KQU(15433791533450605135), KQU( 4254828956815687049),
+ KQU( 1310903207708286015), KQU(10529182764462398549),
+ KQU(14900231311660638810), KQU( 9727017277104609793),
+ KQU( 1821308310948199033), KQU(11628861435066772084),
+ KQU( 9469019138491546924), KQU( 3145812670532604988),
+ KQU( 9938468915045491919), KQU( 1562447430672662142),
+ KQU(13963995266697989134), KQU( 3356884357625028695),
+ KQU( 4499850304584309747), KQU( 8456825817023658122),
+ KQU(10859039922814285279), KQU( 8099512337972526555),
+ KQU( 348006375109672149), KQU(11919893998241688603),
+ KQU( 1104199577402948826), KQU(16689191854356060289),
+ KQU(10992552041730168078), KQU( 7243733172705465836),
+ KQU( 5668075606180319560), KQU(18182847037333286970),
+ KQU( 4290215357664631322), KQU( 4061414220791828613),
+ KQU(13006291061652989604), KQU( 7140491178917128798),
+ KQU(12703446217663283481), KQU( 5500220597564558267),
+ KQU(10330551509971296358), KQU(15958554768648714492),
+ KQU( 5174555954515360045), KQU( 1731318837687577735),
+ KQU( 3557700801048354857), KQU(13764012341928616198),
+ KQU(13115166194379119043), KQU( 7989321021560255519),
+ KQU( 2103584280905877040), KQU( 9230788662155228488),
+ KQU(16396629323325547654), KQU( 657926409811318051),
+ KQU(15046700264391400727), KQU( 5120132858771880830),
+ KQU( 7934160097989028561), KQU( 6963121488531976245),
+ KQU(17412329602621742089), KQU(15144843053931774092),
+ KQU(17204176651763054532), KQU(13166595387554065870),
+ KQU( 8590377810513960213), KQU( 5834365135373991938),
+ KQU( 7640913007182226243), KQU( 3479394703859418425),
+ KQU(16402784452644521040), KQU( 4993979809687083980),
+ KQU(13254522168097688865), KQU(15643659095244365219),
+ KQU( 5881437660538424982), KQU(11174892200618987379),
+ KQU( 254409966159711077), KQU(17158413043140549909),
+ KQU( 3638048789290376272), KQU( 1376816930299489190),
+ KQU( 4622462095217761923), KQU(15086407973010263515),
+ KQU(13253971772784692238), KQU( 5270549043541649236),
+ KQU(11182714186805411604), KQU(12283846437495577140),
+ KQU( 5297647149908953219), KQU(10047451738316836654),
+ KQU( 4938228100367874746), KQU(12328523025304077923),
+ KQU( 3601049438595312361), KQU( 9313624118352733770),
+ KQU(13322966086117661798), KQU(16660005705644029394),
+ KQU(11337677526988872373), KQU(13869299102574417795),
+ KQU(15642043183045645437), KQU( 3021755569085880019),
+ KQU( 4979741767761188161), KQU(13679979092079279587),
+ KQU( 3344685842861071743), KQU(13947960059899588104),
+ KQU( 305806934293368007), KQU( 5749173929201650029),
+ KQU(11123724852118844098), KQU(15128987688788879802),
+ KQU(15251651211024665009), KQU( 7689925933816577776),
+ KQU(16732804392695859449), KQU(17087345401014078468),
+ KQU(14315108589159048871), KQU( 4820700266619778917),
+ KQU(16709637539357958441), KQU( 4936227875177351374),
+ KQU( 2137907697912987247), KQU(11628565601408395420),
+ KQU( 2333250549241556786), KQU( 5711200379577778637),
+ KQU( 5170680131529031729), KQU(12620392043061335164),
+ KQU( 95363390101096078), KQU( 5487981914081709462),
+ KQU( 1763109823981838620), KQU( 3395861271473224396),
+ KQU( 1300496844282213595), KQU( 6894316212820232902),
+ KQU(10673859651135576674), KQU( 5911839658857903252),
+ KQU(17407110743387299102), KQU( 8257427154623140385),
+ KQU(11389003026741800267), KQU( 4070043211095013717),
+ KQU(11663806997145259025), KQU(15265598950648798210),
+ KQU( 630585789434030934), KQU( 3524446529213587334),
+ KQU( 7186424168495184211), KQU(10806585451386379021),
+ KQU(11120017753500499273), KQU( 1586837651387701301),
+ KQU(17530454400954415544), KQU( 9991670045077880430),
+ KQU( 7550997268990730180), KQU( 8640249196597379304),
+ KQU( 3522203892786893823), KQU(10401116549878854788),
+ KQU(13690285544733124852), KQU( 8295785675455774586),
+ KQU(15535716172155117603), KQU( 3112108583723722511),
+ KQU(17633179955339271113), KQU(18154208056063759375),
+ KQU( 1866409236285815666), KQU(13326075895396412882),
+ KQU( 8756261842948020025), KQU( 6281852999868439131),
+ KQU(15087653361275292858), KQU(10333923911152949397),
+ KQU( 5265567645757408500), KQU(12728041843210352184),
+ KQU( 6347959327507828759), KQU( 154112802625564758),
+ KQU(18235228308679780218), KQU( 3253805274673352418),
+ KQU( 4849171610689031197), KQU(17948529398340432518),
+ KQU(13803510475637409167), KQU(13506570190409883095),
+ KQU(15870801273282960805), KQU( 8451286481299170773),
+ KQU( 9562190620034457541), KQU( 8518905387449138364),
+ KQU(12681306401363385655), KQU( 3788073690559762558),
+ KQU( 5256820289573487769), KQU( 2752021372314875467),
+ KQU( 6354035166862520716), KQU( 4328956378309739069),
+ KQU( 449087441228269600), KQU( 5533508742653090868),
+ KQU( 1260389420404746988), KQU(18175394473289055097),
+ KQU( 1535467109660399420), KQU( 8818894282874061442),
+ KQU(12140873243824811213), KQU(15031386653823014946),
+ KQU( 1286028221456149232), KQU( 6329608889367858784),
+ KQU( 9419654354945132725), KQU( 6094576547061672379),
+ KQU(17706217251847450255), KQU( 1733495073065878126),
+ KQU(16918923754607552663), KQU( 8881949849954945044),
+ KQU(12938977706896313891), KQU(14043628638299793407),
+ KQU(18393874581723718233), KQU( 6886318534846892044),
+ KQU(14577870878038334081), KQU(13541558383439414119),
+ KQU(13570472158807588273), KQU(18300760537910283361),
+ KQU( 818368572800609205), KQU( 1417000585112573219),
+ KQU(12337533143867683655), KQU(12433180994702314480),
+ KQU( 778190005829189083), KQU(13667356216206524711),
+ KQU( 9866149895295225230), KQU(11043240490417111999),
+ KQU( 1123933826541378598), KQU( 6469631933605123610),
+ KQU(14508554074431980040), KQU(13918931242962026714),
+ KQU( 2870785929342348285), KQU(14786362626740736974),
+ KQU(13176680060902695786), KQU( 9591778613541679456),
+ KQU( 9097662885117436706), KQU( 749262234240924947),
+ KQU( 1944844067793307093), KQU( 4339214904577487742),
+ KQU( 8009584152961946551), KQU(16073159501225501777),
+ KQU( 3335870590499306217), KQU(17088312653151202847),
+ KQU( 3108893142681931848), KQU(16636841767202792021),
+ KQU(10423316431118400637), KQU( 8008357368674443506),
+ KQU(11340015231914677875), KQU(17687896501594936090),
+ KQU(15173627921763199958), KQU( 542569482243721959),
+ KQU(15071714982769812975), KQU( 4466624872151386956),
+ KQU( 1901780715602332461), KQU( 9822227742154351098),
+ KQU( 1479332892928648780), KQU( 6981611948382474400),
+ KQU( 7620824924456077376), KQU(14095973329429406782),
+ KQU( 7902744005696185404), KQU(15830577219375036920),
+ KQU(10287076667317764416), KQU(12334872764071724025),
+ KQU( 4419302088133544331), KQU(14455842851266090520),
+ KQU(12488077416504654222), KQU( 7953892017701886766),
+ KQU( 6331484925529519007), KQU( 4902145853785030022),
+ KQU(17010159216096443073), KQU(11945354668653886087),
+ KQU(15112022728645230829), KQU(17363484484522986742),
+ KQU( 4423497825896692887), KQU( 8155489510809067471),
+ KQU( 258966605622576285), KQU( 5462958075742020534),
+ KQU( 6763710214913276228), KQU( 2368935183451109054),
+ KQU(14209506165246453811), KQU( 2646257040978514881),
+ KQU( 3776001911922207672), KQU( 1419304601390147631),
+ KQU(14987366598022458284), KQU( 3977770701065815721),
+ KQU( 730820417451838898), KQU( 3982991703612885327),
+ KQU( 2803544519671388477), KQU(17067667221114424649),
+ KQU( 2922555119737867166), KQU( 1989477584121460932),
+ KQU(15020387605892337354), KQU( 9293277796427533547),
+ KQU(10722181424063557247), KQU(16704542332047511651),
+ KQU( 5008286236142089514), KQU(16174732308747382540),
+ KQU(17597019485798338402), KQU(13081745199110622093),
+ KQU( 8850305883842258115), KQU(12723629125624589005),
+ KQU( 8140566453402805978), KQU(15356684607680935061),
+ KQU(14222190387342648650), KQU(11134610460665975178),
+ KQU( 1259799058620984266), KQU(13281656268025610041),
+ KQU( 298262561068153992), KQU(12277871700239212922),
+ KQU(13911297774719779438), KQU(16556727962761474934),
+ KQU(17903010316654728010), KQU( 9682617699648434744),
+ KQU(14757681836838592850), KQU( 1327242446558524473),
+ KQU(11126645098780572792), KQU( 1883602329313221774),
+ KQU( 2543897783922776873), KQU(15029168513767772842),
+ KQU(12710270651039129878), KQU(16118202956069604504),
+ KQU(15010759372168680524), KQU( 2296827082251923948),
+ KQU(10793729742623518101), KQU(13829764151845413046),
+ KQU(17769301223184451213), KQU( 3118268169210783372),
+ KQU(17626204544105123127), KQU( 7416718488974352644),
+ KQU(10450751996212925994), KQU( 9352529519128770586),
+ KQU( 259347569641110140), KQU( 8048588892269692697),
+ KQU( 1774414152306494058), KQU(10669548347214355622),
+ KQU(13061992253816795081), KQU(18432677803063861659),
+ KQU( 8879191055593984333), KQU(12433753195199268041),
+ KQU(14919392415439730602), KQU( 6612848378595332963),
+ KQU( 6320986812036143628), KQU(10465592420226092859),
+ KQU( 4196009278962570808), KQU( 3747816564473572224),
+ KQU(17941203486133732898), KQU( 2350310037040505198),
+ KQU( 5811779859134370113), KQU(10492109599506195126),
+ KQU( 7699650690179541274), KQU( 1954338494306022961),
+ KQU(14095816969027231152), KQU( 5841346919964852061),
+ KQU(14945969510148214735), KQU( 3680200305887550992),
+ KQU( 6218047466131695792), KQU( 8242165745175775096),
+ KQU(11021371934053307357), KQU( 1265099502753169797),
+ KQU( 4644347436111321718), KQU( 3609296916782832859),
+ KQU( 8109807992218521571), KQU(18387884215648662020),
+ KQU(14656324896296392902), KQU(17386819091238216751),
+ KQU(17788300878582317152), KQU( 7919446259742399591),
+ KQU( 4466613134576358004), KQU(12928181023667938509),
+ KQU(13147446154454932030), KQU(16552129038252734620),
+ KQU( 8395299403738822450), KQU(11313817655275361164),
+ KQU( 434258809499511718), KQU( 2074882104954788676),
+ KQU( 7929892178759395518), KQU( 9006461629105745388),
+ KQU( 5176475650000323086), KQU(11128357033468341069),
+ KQU(12026158851559118955), KQU(14699716249471156500),
+ KQU( 448982497120206757), KQU( 4156475356685519900),
+ KQU( 6063816103417215727), KQU(10073289387954971479),
+ KQU( 8174466846138590962), KQU( 2675777452363449006),
+ KQU( 9090685420572474281), KQU( 6659652652765562060),
+ KQU(12923120304018106621), KQU(11117480560334526775),
+ KQU( 937910473424587511), KQU( 1838692113502346645),
+ KQU(11133914074648726180), KQU( 7922600945143884053),
+ KQU(13435287702700959550), KQU( 5287964921251123332),
+ KQU(11354875374575318947), KQU(17955724760748238133),
+ KQU(13728617396297106512), KQU( 4107449660118101255),
+ KQU( 1210269794886589623), KQU(11408687205733456282),
+ KQU( 4538354710392677887), KQU(13566803319341319267),
+ KQU(17870798107734050771), KQU( 3354318982568089135),
+ KQU( 9034450839405133651), KQU(13087431795753424314),
+ KQU( 950333102820688239), KQU( 1968360654535604116),
+ KQU(16840551645563314995), KQU( 8867501803892924995),
+ KQU(11395388644490626845), KQU( 1529815836300732204),
+ KQU(13330848522996608842), KQU( 1813432878817504265),
+ KQU( 2336867432693429560), KQU(15192805445973385902),
+ KQU( 2528593071076407877), KQU( 128459777936689248),
+ KQU( 9976345382867214866), KQU( 6208885766767996043),
+ KQU(14982349522273141706), KQU( 3099654362410737822),
+ KQU(13776700761947297661), KQU( 8806185470684925550),
+ KQU( 8151717890410585321), KQU( 640860591588072925),
+ KQU(14592096303937307465), KQU( 9056472419613564846),
+ KQU(14861544647742266352), KQU(12703771500398470216),
+ KQU( 3142372800384138465), KQU( 6201105606917248196),
+ KQU(18337516409359270184), KQU(15042268695665115339),
+ KQU(15188246541383283846), KQU(12800028693090114519),
+ KQU( 5992859621101493472), KQU(18278043971816803521),
+ KQU( 9002773075219424560), KQU( 7325707116943598353),
+ KQU( 7930571931248040822), KQU( 5645275869617023448),
+ KQU( 7266107455295958487), KQU( 4363664528273524411),
+ KQU(14313875763787479809), KQU(17059695613553486802),
+ KQU( 9247761425889940932), KQU(13704726459237593128),
+ KQU( 2701312427328909832), KQU(17235532008287243115),
+ KQU(14093147761491729538), KQU( 6247352273768386516),
+ KQU( 8268710048153268415), KQU( 7985295214477182083),
+ KQU(15624495190888896807), KQU( 3772753430045262788),
+ KQU( 9133991620474991698), KQU( 5665791943316256028),
+ KQU( 7551996832462193473), KQU(13163729206798953877),
+ KQU( 9263532074153846374), KQU( 1015460703698618353),
+ KQU(17929874696989519390), KQU(18257884721466153847),
+ KQU(16271867543011222991), KQU( 3905971519021791941),
+ KQU(16814488397137052085), KQU( 1321197685504621613),
+ KQU( 2870359191894002181), KQU(14317282970323395450),
+ KQU(13663920845511074366), KQU( 2052463995796539594),
+ KQU(14126345686431444337), KQU( 1727572121947022534),
+ KQU(17793552254485594241), KQU( 6738857418849205750),
+ KQU( 1282987123157442952), KQU(16655480021581159251),
+ KQU( 6784587032080183866), KQU(14726758805359965162),
+ KQU( 7577995933961987349), KQU(12539609320311114036),
+ KQU(10789773033385439494), KQU( 8517001497411158227),
+ KQU(10075543932136339710), KQU(14838152340938811081),
+ KQU( 9560840631794044194), KQU(17445736541454117475),
+ KQU(10633026464336393186), KQU(15705729708242246293),
+ KQU( 1117517596891411098), KQU( 4305657943415886942),
+ KQU( 4948856840533979263), KQU(16071681989041789593),
+ KQU(13723031429272486527), KQU( 7639567622306509462),
+ KQU(12670424537483090390), KQU( 9715223453097197134),
+ KQU( 5457173389992686394), KQU( 289857129276135145),
+ KQU(17048610270521972512), KQU( 692768013309835485),
+ KQU(14823232360546632057), KQU(18218002361317895936),
+ KQU( 3281724260212650204), KQU(16453957266549513795),
+ KQU( 8592711109774511881), KQU( 929825123473369579),
+ KQU(15966784769764367791), KQU( 9627344291450607588),
+ KQU(10849555504977813287), KQU( 9234566913936339275),
+ KQU( 6413807690366911210), KQU(10862389016184219267),
+ KQU(13842504799335374048), KQU( 1531994113376881174),
+ KQU( 2081314867544364459), KQU(16430628791616959932),
+ KQU( 8314714038654394368), KQU( 9155473892098431813),
+ KQU(12577843786670475704), KQU( 4399161106452401017),
+ KQU( 1668083091682623186), KQU( 1741383777203714216),
+ KQU( 2162597285417794374), KQU(15841980159165218736),
+ KQU( 1971354603551467079), KQU( 1206714764913205968),
+ KQU( 4790860439591272330), KQU(14699375615594055799),
+ KQU( 8374423871657449988), KQU(10950685736472937738),
+ KQU( 697344331343267176), KQU(10084998763118059810),
+ KQU(12897369539795983124), KQU(12351260292144383605),
+ KQU( 1268810970176811234), KQU( 7406287800414582768),
+ KQU( 516169557043807831), KQU( 5077568278710520380),
+ KQU( 3828791738309039304), KQU( 7721974069946943610),
+ KQU( 3534670260981096460), KQU( 4865792189600584891),
+ KQU(16892578493734337298), KQU( 9161499464278042590),
+ KQU(11976149624067055931), KQU(13219479887277343990),
+ KQU(14161556738111500680), KQU(14670715255011223056),
+ KQU( 4671205678403576558), KQU(12633022931454259781),
+ KQU(14821376219869187646), KQU( 751181776484317028),
+ KQU( 2192211308839047070), KQU(11787306362361245189),
+ KQU(10672375120744095707), KQU( 4601972328345244467),
+ KQU(15457217788831125879), KQU( 8464345256775460809),
+ KQU(10191938789487159478), KQU( 6184348739615197613),
+ KQU(11425436778806882100), KQU( 2739227089124319793),
+ KQU( 461464518456000551), KQU( 4689850170029177442),
+ KQU( 6120307814374078625), KQU(11153579230681708671),
+ KQU( 7891721473905347926), KQU(10281646937824872400),
+ KQU( 3026099648191332248), KQU( 8666750296953273818),
+ KQU(14978499698844363232), KQU(13303395102890132065),
+ KQU( 8182358205292864080), KQU(10560547713972971291),
+ KQU(11981635489418959093), KQU( 3134621354935288409),
+ KQU(11580681977404383968), KQU(14205530317404088650),
+ KQU( 5997789011854923157), KQU(13659151593432238041),
+ KQU(11664332114338865086), KQU( 7490351383220929386),
+ KQU( 7189290499881530378), KQU(15039262734271020220),
+ KQU( 2057217285976980055), KQU( 555570804905355739),
+ KQU(11235311968348555110), KQU(13824557146269603217),
+ KQU(16906788840653099693), KQU( 7222878245455661677),
+ KQU( 5245139444332423756), KQU( 4723748462805674292),
+ KQU(12216509815698568612), KQU(17402362976648951187),
+ KQU(17389614836810366768), KQU( 4880936484146667711),
+ KQU( 9085007839292639880), KQU(13837353458498535449),
+ KQU(11914419854360366677), KQU(16595890135313864103),
+ KQU( 6313969847197627222), KQU(18296909792163910431),
+ KQU(10041780113382084042), KQU( 2499478551172884794),
+ KQU(11057894246241189489), KQU( 9742243032389068555),
+ KQU(12838934582673196228), KQU(13437023235248490367),
+ KQU(13372420669446163240), KQU( 6752564244716909224),
+ KQU( 7157333073400313737), KQU(12230281516370654308),
+ KQU( 1182884552219419117), KQU( 2955125381312499218),
+ KQU(10308827097079443249), KQU( 1337648572986534958),
+ KQU(16378788590020343939), KQU( 108619126514420935),
+ KQU( 3990981009621629188), KQU( 5460953070230946410),
+ KQU( 9703328329366531883), KQU(13166631489188077236),
+ KQU( 1104768831213675170), KQU( 3447930458553877908),
+ KQU( 8067172487769945676), KQU( 5445802098190775347),
+ KQU( 3244840981648973873), KQU(17314668322981950060),
+ KQU( 5006812527827763807), KQU(18158695070225526260),
+ KQU( 2824536478852417853), KQU(13974775809127519886),
+ KQU( 9814362769074067392), KQU(17276205156374862128),
+ KQU(11361680725379306967), KQU( 3422581970382012542),
+ KQU(11003189603753241266), KQU(11194292945277862261),
+ KQU( 6839623313908521348), KQU(11935326462707324634),
+ KQU( 1611456788685878444), KQU(13112620989475558907),
+ KQU( 517659108904450427), KQU(13558114318574407624),
+ KQU(15699089742731633077), KQU( 4988979278862685458),
+ KQU( 8111373583056521297), KQU( 3891258746615399627),
+ KQU( 8137298251469718086), KQU(12748663295624701649),
+ KQU( 4389835683495292062), KQU( 5775217872128831729),
+ KQU( 9462091896405534927), KQU( 8498124108820263989),
+ KQU( 8059131278842839525), KQU(10503167994254090892),
+ KQU(11613153541070396656), KQU(18069248738504647790),
+ KQU( 570657419109768508), KQU( 3950574167771159665),
+ KQU( 5514655599604313077), KQU( 2908460854428484165),
+ KQU(10777722615935663114), KQU(12007363304839279486),
+ KQU( 9800646187569484767), KQU( 8795423564889864287),
+ KQU(14257396680131028419), KQU( 6405465117315096498),
+ KQU( 7939411072208774878), KQU(17577572378528990006),
+ KQU(14785873806715994850), KQU(16770572680854747390),
+ KQU(18127549474419396481), KQU(11637013449455757750),
+ KQU(14371851933996761086), KQU( 3601181063650110280),
+ KQU( 4126442845019316144), KQU(10198287239244320669),
+ KQU(18000169628555379659), KQU(18392482400739978269),
+ KQU( 6219919037686919957), KQU( 3610085377719446052),
+ KQU( 2513925039981776336), KQU(16679413537926716955),
+ KQU(12903302131714909434), KQU( 5581145789762985009),
+ KQU(12325955044293303233), KQU(17216111180742141204),
+ KQU( 6321919595276545740), KQU( 3507521147216174501),
+ KQU( 9659194593319481840), KQU(11473976005975358326),
+ KQU(14742730101435987026), KQU( 492845897709954780),
+ KQU(16976371186162599676), KQU(17712703422837648655),
+ KQU( 9881254778587061697), KQU( 8413223156302299551),
+ KQU( 1563841828254089168), KQU( 9996032758786671975),
+ KQU( 138877700583772667), KQU(13003043368574995989),
+ KQU( 4390573668650456587), KQU( 8610287390568126755),
+ KQU(15126904974266642199), KQU( 6703637238986057662),
+ KQU( 2873075592956810157), KQU( 6035080933946049418),
+ KQU(13382846581202353014), KQU( 7303971031814642463),
+ KQU(18418024405307444267), KQU( 5847096731675404647),
+ KQU( 4035880699639842500), KQU(11525348625112218478),
+ KQU( 3041162365459574102), KQU( 2604734487727986558),
+ KQU(15526341771636983145), KQU(14556052310697370254),
+ KQU(12997787077930808155), KQU( 9601806501755554499),
+ KQU(11349677952521423389), KQU(14956777807644899350),
+ KQU(16559736957742852721), KQU(12360828274778140726),
+ KQU( 6685373272009662513), KQU(16932258748055324130),
+ KQU(15918051131954158508), KQU( 1692312913140790144),
+ KQU( 546653826801637367), KQU( 5341587076045986652),
+ KQU(14975057236342585662), KQU(12374976357340622412),
+ KQU(10328833995181940552), KQU(12831807101710443149),
+ KQU(10548514914382545716), KQU( 2217806727199715993),
+ KQU(12627067369242845138), KQU( 4598965364035438158),
+ KQU( 150923352751318171), KQU(14274109544442257283),
+ KQU( 4696661475093863031), KQU( 1505764114384654516),
+ KQU(10699185831891495147), KQU( 2392353847713620519),
+ KQU( 3652870166711788383), KQU( 8640653276221911108),
+ KQU( 3894077592275889704), KQU( 4918592872135964845),
+ KQU(16379121273281400789), KQU(12058465483591683656),
+ KQU(11250106829302924945), KQU( 1147537556296983005),
+ KQU( 6376342756004613268), KQU(14967128191709280506),
+ KQU(18007449949790627628), KQU( 9497178279316537841),
+ KQU( 7920174844809394893), KQU(10037752595255719907),
+ KQU(15875342784985217697), KQU(15311615921712850696),
+ KQU( 9552902652110992950), KQU(14054979450099721140),
+ KQU( 5998709773566417349), KQU(18027910339276320187),
+ KQU( 8223099053868585554), KQU( 7842270354824999767),
+ KQU( 4896315688770080292), KQU(12969320296569787895),
+ KQU( 2674321489185759961), KQU( 4053615936864718439),
+ KQU(11349775270588617578), KQU( 4743019256284553975),
+ KQU( 5602100217469723769), KQU(14398995691411527813),
+ KQU( 7412170493796825470), KQU( 836262406131744846),
+ KQU( 8231086633845153022), KQU( 5161377920438552287),
+ KQU( 8828731196169924949), KQU(16211142246465502680),
+ KQU( 3307990879253687818), KQU( 5193405406899782022),
+ KQU( 8510842117467566693), KQU( 6070955181022405365),
+ KQU(14482950231361409799), KQU(12585159371331138077),
+ KQU( 3511537678933588148), KQU( 2041849474531116417),
+ KQU(10944936685095345792), KQU(18303116923079107729),
+ KQU( 2720566371239725320), KQU( 4958672473562397622),
+ KQU( 3032326668253243412), KQU(13689418691726908338),
+ KQU( 1895205511728843996), KQU( 8146303515271990527),
+ KQU(16507343500056113480), KQU( 473996939105902919),
+ KQU( 9897686885246881481), KQU(14606433762712790575),
+ KQU( 6732796251605566368), KQU( 1399778120855368916),
+ KQU( 935023885182833777), KQU(16066282816186753477),
+ KQU( 7291270991820612055), KQU(17530230393129853844),
+ KQU(10223493623477451366), KQU(15841725630495676683),
+ KQU(17379567246435515824), KQU( 8588251429375561971),
+ KQU(18339511210887206423), KQU(17349587430725976100),
+ KQU(12244876521394838088), KQU( 6382187714147161259),
+ KQU(12335807181848950831), KQU(16948885622305460665),
+ KQU(13755097796371520506), KQU(14806740373324947801),
+ KQU( 4828699633859287703), KQU( 8209879281452301604),
+ KQU(12435716669553736437), KQU(13970976859588452131),
+ KQU( 6233960842566773148), KQU(12507096267900505759),
+ KQU( 1198713114381279421), KQU(14989862731124149015),
+ KQU(15932189508707978949), KQU( 2526406641432708722),
+ KQU( 29187427817271982), KQU( 1499802773054556353),
+ KQU(10816638187021897173), KQU( 5436139270839738132),
+ KQU( 6659882287036010082), KQU( 2154048955317173697),
+ KQU(10887317019333757642), KQU(16281091802634424955),
+ KQU(10754549879915384901), KQU(10760611745769249815),
+ KQU( 2161505946972504002), KQU( 5243132808986265107),
+ KQU(10129852179873415416), KQU( 710339480008649081),
+ KQU( 7802129453068808528), KQU(17967213567178907213),
+ KQU(15730859124668605599), KQU(13058356168962376502),
+ KQU( 3701224985413645909), KQU(14464065869149109264),
+ KQU( 9959272418844311646), KQU(10157426099515958752),
+ KQU(14013736814538268528), KQU(17797456992065653951),
+ KQU(17418878140257344806), KQU(15457429073540561521),
+ KQU( 2184426881360949378), KQU( 2062193041154712416),
+ KQU( 8553463347406931661), KQU( 4913057625202871854),
+ KQU( 2668943682126618425), KQU(17064444737891172288),
+ KQU( 4997115903913298637), KQU(12019402608892327416),
+ KQU(17603584559765897352), KQU(11367529582073647975),
+ KQU( 8211476043518436050), KQU( 8676849804070323674),
+ KQU(18431829230394475730), KQU(10490177861361247904),
+ KQU( 9508720602025651349), KQU( 7409627448555722700),
+ KQU( 5804047018862729008), KQU(11943858176893142594),
+ KQU(11908095418933847092), KQU( 5415449345715887652),
+ KQU( 1554022699166156407), KQU( 9073322106406017161),
+ KQU( 7080630967969047082), KQU(18049736940860732943),
+ KQU(12748714242594196794), KQU( 1226992415735156741),
+ KQU(17900981019609531193), KQU(11720739744008710999),
+ KQU( 3006400683394775434), KQU(11347974011751996028),
+ KQU( 3316999628257954608), KQU( 8384484563557639101),
+ KQU(18117794685961729767), KQU( 1900145025596618194),
+ KQU(17459527840632892676), KQU( 5634784101865710994),
+ KQU( 7918619300292897158), KQU( 3146577625026301350),
+ KQU( 9955212856499068767), KQU( 1873995843681746975),
+ KQU( 1561487759967972194), KQU( 8322718804375878474),
+ KQU(11300284215327028366), KQU( 4667391032508998982),
+ KQU( 9820104494306625580), KQU(17922397968599970610),
+ KQU( 1784690461886786712), KQU(14940365084341346821),
+ KQU( 5348719575594186181), KQU(10720419084507855261),
+ KQU(14210394354145143274), KQU( 2426468692164000131),
+ KQU(16271062114607059202), KQU(14851904092357070247),
+ KQU( 6524493015693121897), KQU( 9825473835127138531),
+ KQU(14222500616268569578), KQU(15521484052007487468),
+ KQU(14462579404124614699), KQU(11012375590820665520),
+ KQU(11625327350536084927), KQU(14452017765243785417),
+ KQU( 9989342263518766305), KQU( 3640105471101803790),
+ KQU( 4749866455897513242), KQU(13963064946736312044),
+ KQU(10007416591973223791), KQU(18314132234717431115),
+ KQU( 3286596588617483450), KQU( 7726163455370818765),
+ KQU( 7575454721115379328), KQU( 5308331576437663422),
+ KQU(18288821894903530934), KQU( 8028405805410554106),
+ KQU(15744019832103296628), KQU( 149765559630932100),
+ KQU( 6137705557200071977), KQU(14513416315434803615),
+ KQU(11665702820128984473), KQU( 218926670505601386),
+ KQU( 6868675028717769519), KQU(15282016569441512302),
+ KQU( 5707000497782960236), KQU( 6671120586555079567),
+ KQU( 2194098052618985448), KQU(16849577895477330978),
+ KQU(12957148471017466283), KQU( 1997805535404859393),
+ KQU( 1180721060263860490), KQU(13206391310193756958),
+ KQU(12980208674461861797), KQU( 3825967775058875366),
+ KQU(17543433670782042631), KQU( 1518339070120322730),
+ KQU(16344584340890991669), KQU( 2611327165318529819),
+ KQU(11265022723283422529), KQU( 4001552800373196817),
+ KQU(14509595890079346161), KQU( 3528717165416234562),
+ KQU(18153222571501914072), KQU( 9387182977209744425),
+ KQU(10064342315985580021), KQU(11373678413215253977),
+ KQU( 2308457853228798099), KQU( 9729042942839545302),
+ KQU( 7833785471140127746), KQU( 6351049900319844436),
+ KQU(14454610627133496067), KQU(12533175683634819111),
+ KQU(15570163926716513029), KQU(13356980519185762498)
+};
+
+TEST_BEGIN(test_gen_rand_32) {
+ uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint32_t r32;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size32(), BLOCK_SIZE,
+ "Array size too small");
+ ctx = init_gen_rand(1234);
+ fill_array32(ctx, array32, BLOCK_SIZE);
+ fill_array32(ctx, array32_2, BLOCK_SIZE);
+ fini_gen_rand(ctx);
+
+ ctx = init_gen_rand(1234);
+ for (i = 0; i < BLOCK_SIZE; i++) {
+ if (i < COUNT_1) {
+ assert_u32_eq(array32[i], init_gen_rand_32_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32[i],
+ "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32_2[i],
+ "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
+ r32);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_by_array_32) {
+ uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0};
+ uint32_t r32;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size32(), BLOCK_SIZE,
+ "Array size too small");
+ ctx = init_by_array(ini, 4);
+ fill_array32(ctx, array32, BLOCK_SIZE);
+ fill_array32(ctx, array32_2, BLOCK_SIZE);
+ fini_gen_rand(ctx);
+
+ ctx = init_by_array(ini, 4);
+ for (i = 0; i < BLOCK_SIZE; i++) {
+ if (i < COUNT_1) {
+ assert_u32_eq(array32[i], init_by_array_32_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32[i],
+ "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32_2[i],
+ "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
+ r32);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_gen_rand_64) {
+ uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint64_t r;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ "Array size too small");
+ ctx = init_gen_rand(4321);
+ fill_array64(ctx, array64, BLOCK_SIZE64);
+ fill_array64(ctx, array64_2, BLOCK_SIZE64);
+ fini_gen_rand(ctx);
+
+ ctx = init_gen_rand(4321);
+ for (i = 0; i < BLOCK_SIZE64; i++) {
+ if (i < COUNT_1) {
+ assert_u64_eq(array64[i], init_gen_rand_64_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64[i],
+ "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
+ array64[i], r);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64_2[i],
+ "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
+ array64_2[i], r);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_by_array_64) {
+ uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint64_t r;
+ uint32_t ini[] = {5, 4, 3, 2, 1};
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ "Array size too small");
+ ctx = init_by_array(ini, 5);
+ fill_array64(ctx, array64, BLOCK_SIZE64);
+ fill_array64(ctx, array64_2, BLOCK_SIZE64);
+ fini_gen_rand(ctx);
+
+ ctx = init_by_array(ini, 5);
+ for (i = 0; i < BLOCK_SIZE64; i++) {
+ if (i < COUNT_1) {
+ assert_u64_eq(array64[i], init_by_array_64_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64[i],
+ "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
+ array64[i], r);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64_2[i],
+ "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
+ array64_2[i], r);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_gen_rand_32,
+ test_by_array_32,
+ test_gen_rand_64,
+ test_by_array_64);
+}
diff --git a/deps/jemalloc/test/unit/a0.c b/deps/jemalloc/test/unit/a0.c
new file mode 100644
index 0000000..a27ab3f
--- /dev/null
+++ b/deps/jemalloc/test/unit/a0.c
@@ -0,0 +1,16 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_a0) {
+ void *p;
+
+ p = a0malloc(1);
+ assert_ptr_not_null(p, "Unexpected a0malloc() error");
+ a0dalloc(p);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_malloc_init(
+ test_a0);
+}
diff --git a/deps/jemalloc/test/unit/arena_reset.c b/deps/jemalloc/test/unit/arena_reset.c
new file mode 100644
index 0000000..b182f31
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_reset.c
@@ -0,0 +1,349 @@
+#ifndef ARENA_RESET_PROF_C_
+#include "test/jemalloc_test.h"
+#endif
+
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/rtree.h"
+
+#include "test/extent_hooks.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nsmall(void) {
+ return get_nsizes_impl("arenas.nbins");
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_small_size(size_t ind) {
+ return get_size_impl("arenas.bin.0.size", ind);
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+/* Like ivsalloc(), but safe to call on discarded allocations. */
+static size_t
+vsalloc(tsdn_t *tsdn, const void *ptr) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ extent_t *extent;
+ szind_t szind;
+ if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, false, &extent, &szind)) {
+ return 0;
+ }
+
+ if (extent == NULL) {
+ return 0;
+ }
+ if (extent_state_get(extent) != extent_state_active) {
+ return 0;
+ }
+
+ if (szind == SC_NSIZES) {
+ return 0;
+ }
+
+ return sz_index2size(szind);
+}
+
+static unsigned
+do_arena_create(extent_hooks_t *h) {
+ unsigned arena_ind;
+ size_t sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
+ "Unexpected mallctl() failure");
+ return arena_ind;
+}
+
+static void
+do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
+#define NLARGE 32
+ unsigned nsmall, nlarge, i;
+ size_t sz;
+ int flags;
+ tsdn_t *tsdn;
+
+ flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ nsmall = get_nsmall();
+ nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
+ *nptrs = nsmall + nlarge;
+ *ptrs = (void **)malloc(*nptrs * sizeof(void *));
+ assert_ptr_not_null(*ptrs, "Unexpected malloc() failure");
+
+ /* Allocate objects with a wide range of sizes. */
+ for (i = 0; i < nsmall; i++) {
+ sz = get_small_size(i);
+ (*ptrs)[i] = mallocx(sz, flags);
+ assert_ptr_not_null((*ptrs)[i],
+ "Unexpected mallocx(%zu, %#x) failure", sz, flags);
+ }
+ for (i = 0; i < nlarge; i++) {
+ sz = get_large_size(i);
+ (*ptrs)[nsmall + i] = mallocx(sz, flags);
+ assert_ptr_not_null((*ptrs)[i],
+ "Unexpected mallocx(%zu, %#x) failure", sz, flags);
+ }
+
+ tsdn = tsdn_fetch();
+
+ /* Verify allocations. */
+ for (i = 0; i < *nptrs; i++) {
+ assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
+ "Allocation should have queryable size");
+ }
+}
+
+static void
+do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
+ tsdn_t *tsdn;
+ unsigned i;
+
+ tsdn = tsdn_fetch();
+
+ if (have_background_thread) {
+ malloc_mutex_lock(tsdn,
+ &background_thread_info_get(arena_ind)->mtx);
+ }
+ /* Verify allocations no longer exist. */
+ for (i = 0; i < nptrs; i++) {
+ assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
+ "Allocation should no longer exist");
+ }
+ if (have_background_thread) {
+ malloc_mutex_unlock(tsdn,
+ &background_thread_info_get(arena_ind)->mtx);
+ }
+
+ free(ptrs);
+}
+
+static void
+do_arena_reset_destroy(const char *name, unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib(name, mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+static void
+do_arena_reset(unsigned arena_ind) {
+ do_arena_reset_destroy("arena.0.reset", arena_ind);
+}
+
+static void
+do_arena_destroy(unsigned arena_ind) {
+ do_arena_reset_destroy("arena.0.destroy", arena_ind);
+}
+
+TEST_BEGIN(test_arena_reset) {
+ unsigned arena_ind;
+ void **ptrs;
+ unsigned nptrs;
+
+ arena_ind = do_arena_create(NULL);
+ do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
+ do_arena_reset(arena_ind);
+ do_arena_reset_post(ptrs, nptrs, arena_ind);
+}
+TEST_END
+
+static bool
+arena_i_initialized(unsigned arena_ind, bool refresh) {
+ bool initialized;
+ size_t mib[3];
+ size_t miblen, sz;
+
+ if (refresh) {
+ uint64_t epoch = 1;
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ sizeof(epoch)), 0, "Unexpected mallctl() failure");
+ }
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ sz = sizeof(initialized);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
+ 0), 0, "Unexpected mallctlbymib() failure");
+
+ return initialized;
+}
+
+TEST_BEGIN(test_arena_destroy_initial) {
+ assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ "Destroyed arena stats should not be initialized");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_destroy_hooks_default) {
+ unsigned arena_ind, arena_ind_another, arena_ind_prev;
+ void **ptrs;
+ unsigned nptrs;
+
+ arena_ind = do_arena_create(NULL);
+ do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
+
+ assert_false(arena_i_initialized(arena_ind, false),
+ "Arena stats should not be initialized");
+ assert_true(arena_i_initialized(arena_ind, true),
+ "Arena stats should be initialized");
+
+ /*
+ * Create another arena before destroying one, to better verify arena
+ * index reuse.
+ */
+ arena_ind_another = do_arena_create(NULL);
+
+ do_arena_destroy(arena_ind);
+
+ assert_false(arena_i_initialized(arena_ind, true),
+ "Arena stats should not be initialized");
+ assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ "Destroyed arena stats should be initialized");
+
+ do_arena_reset_post(ptrs, nptrs, arena_ind);
+
+ arena_ind_prev = arena_ind;
+ arena_ind = do_arena_create(NULL);
+ do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
+ assert_u_eq(arena_ind, arena_ind_prev,
+ "Arena index should have been recycled");
+ do_arena_destroy(arena_ind);
+ do_arena_reset_post(ptrs, nptrs, arena_ind);
+
+ do_arena_destroy(arena_ind_another);
+}
+TEST_END
+
+/*
+ * Actually unmap extents, regardless of opt_retain, so that attempts to access
+ * a destroyed arena's memory will segfault.
+ */
+static bool
+extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
+ "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
+ "true" : "false", arena_ind);
+ assert_ptr_eq(extent_hooks, &hooks,
+ "extent_hooks should be same as pointer used to set hooks");
+ assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
+ "Wrong hook function");
+ called_dalloc = true;
+ if (!try_dalloc) {
+ return true;
+ }
+ did_dalloc = true;
+ if (!maps_coalesce && opt_retain) {
+ return true;
+ }
+ pages_unmap(addr, size);
+ return false;
+}
+
+static extent_hooks_t hooks_orig;
+
+static extent_hooks_t hooks_unmap = {
+ extent_alloc_hook,
+ extent_dalloc_unmap, /* dalloc */
+ extent_destroy_hook,
+ extent_commit_hook,
+ extent_decommit_hook,
+ extent_purge_lazy_hook,
+ extent_purge_forced_hook,
+ extent_split_hook,
+ extent_merge_hook
+};
+
+TEST_BEGIN(test_arena_destroy_hooks_unmap) {
+ unsigned arena_ind;
+ void **ptrs;
+ unsigned nptrs;
+
+ extent_hooks_prep();
+ if (maps_coalesce) {
+ try_decommit = false;
+ }
+ memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
+ memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
+
+ did_alloc = false;
+ arena_ind = do_arena_create(&hooks);
+ do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
+
+ assert_true(did_alloc, "Expected alloc");
+
+ assert_false(arena_i_initialized(arena_ind, false),
+ "Arena stats should not be initialized");
+ assert_true(arena_i_initialized(arena_ind, true),
+ "Arena stats should be initialized");
+
+ did_dalloc = false;
+ do_arena_destroy(arena_ind);
+ assert_true(did_dalloc, "Expected dalloc");
+
+ assert_false(arena_i_initialized(arena_ind, true),
+ "Arena stats should not be initialized");
+ assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ "Destroyed arena stats should be initialized");
+
+ do_arena_reset_post(ptrs, nptrs, arena_ind);
+
+ memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_arena_reset,
+ test_arena_destroy_initial,
+ test_arena_destroy_hooks_default,
+ test_arena_destroy_hooks_unmap);
+}
diff --git a/deps/jemalloc/test/unit/arena_reset_prof.c b/deps/jemalloc/test/unit/arena_reset_prof.c
new file mode 100644
index 0000000..38d8012
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_reset_prof.c
@@ -0,0 +1,4 @@
+#include "test/jemalloc_test.h"
+#define ARENA_RESET_PROF_C_
+
+#include "arena_reset.c"
diff --git a/deps/jemalloc/test/unit/arena_reset_prof.sh b/deps/jemalloc/test/unit/arena_reset_prof.sh
new file mode 100644
index 0000000..041dc1c
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_reset_prof.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="prof:true,lg_prof_sample:0"
diff --git a/deps/jemalloc/test/unit/atomic.c b/deps/jemalloc/test/unit/atomic.c
new file mode 100644
index 0000000..572d8d2
--- /dev/null
+++ b/deps/jemalloc/test/unit/atomic.c
@@ -0,0 +1,229 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for
+ * bool, etc. The one exception is that the short name for void * is "p" in
+ * some places and "ptr" in others. In the long run it would be nice to unify
+ * these, but in the short run we'll use this shim.
+ */
+#define assert_p_eq assert_ptr_eq
+
+/*
+ * t: the non-atomic type, like "uint32_t".
+ * ta: the short name for the type, like "u32".
+ * val[1,2,3]: Values of the given type. The CAS tests use val2 for expected,
+ * and val3 for desired.
+ */
+
+#define DO_TESTS(t, ta, val1, val2, val3) do { \
+ t val; \
+ t expected; \
+ bool success; \
+ /* This (along with the load below) also tests ATOMIC_LOAD. */ \
+ atomic_##ta##_t atom = ATOMIC_INIT(val1); \
+ \
+ /* ATOMIC_INIT and load. */ \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1, val, "Load or init failed"); \
+ \
+ /* Store. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val2, val, "Store failed"); \
+ \
+ /* Exchange. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1, val, "Exchange returned invalid value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val2, val, "Exchange store invalid value"); \
+ \
+ /* \
+ * Weak CAS. Spurious failures are allowed, so we loop a few \
+ * times. \
+ */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ success = false; \
+ for (int i = 0; i < 10 && !success; i++) { \
+ expected = val2; \
+ success = atomic_compare_exchange_weak_##ta(&atom, \
+ &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1, expected, \
+ "CAS should update expected"); \
+ } \
+ assert_b_eq(val1 == val2, success, \
+ "Weak CAS did the wrong state update"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ if (success) { \
+ assert_##ta##_eq(val3, val, \
+ "Successful CAS should update atomic"); \
+ } else { \
+ assert_##ta##_eq(val1, val, \
+ "Unsuccessful CAS should not update atomic"); \
+ } \
+ \
+ /* Strong CAS. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ expected = val2; \
+ success = atomic_compare_exchange_strong_##ta(&atom, &expected, \
+ val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
+ assert_b_eq(val1 == val2, success, \
+ "Strong CAS did the wrong state update"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ if (success) { \
+ assert_##ta##_eq(val3, val, \
+ "Successful CAS should update atomic"); \
+ } else { \
+ assert_##ta##_eq(val1, val, \
+ "Unsuccessful CAS should not update atomic"); \
+ } \
+ \
+ \
+} while (0)
+
+#define DO_INTEGER_TESTS(t, ta, val1, val2) do { \
+ atomic_##ta##_t atom; \
+ t val; \
+ \
+ /* Fetch-add. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1, val, \
+ "Fetch-add should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1 + val2, val, \
+ "Fetch-add should update atomic"); \
+ \
+ /* Fetch-sub. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1, val, \
+ "Fetch-sub should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1 - val2, val, \
+ "Fetch-sub should update atomic"); \
+ \
+ /* Fetch-and. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1, val, \
+ "Fetch-and should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1 & val2, val, \
+ "Fetch-and should update atomic"); \
+ \
+ /* Fetch-or. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1, val, \
+ "Fetch-or should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1 | val2, val, \
+ "Fetch-or should update atomic"); \
+ \
+ /* Fetch-xor. */ \
+ atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
+ val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1, val, \
+ "Fetch-xor should return previous value"); \
+ val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
+ assert_##ta##_eq(val1 ^ val2, val, \
+ "Fetch-xor should update atomic"); \
+} while (0)
+
+#define TEST_STRUCT(t, ta) \
+typedef struct { \
+ t val1; \
+ t val2; \
+ t val3; \
+} ta##_test_t;
+
+#define TEST_CASES(t) { \
+ {(t)-1, (t)-1, (t)-2}, \
+ {(t)-1, (t) 0, (t)-2}, \
+ {(t)-1, (t) 1, (t)-2}, \
+ \
+ {(t) 0, (t)-1, (t)-2}, \
+ {(t) 0, (t) 0, (t)-2}, \
+ {(t) 0, (t) 1, (t)-2}, \
+ \
+ {(t) 1, (t)-1, (t)-2}, \
+ {(t) 1, (t) 0, (t)-2}, \
+ {(t) 1, (t) 1, (t)-2}, \
+ \
+ {(t)0, (t)-(1 << 22), (t)-2}, \
+ {(t)0, (t)(1 << 22), (t)-2}, \
+ {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \
+ {(t)(1 << 22), (t)(1 << 22), (t)-2} \
+}
+
+#define TEST_BODY(t, ta) do { \
+ const ta##_test_t tests[] = TEST_CASES(t); \
+ for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \
+ ta##_test_t test = tests[i]; \
+ DO_TESTS(t, ta, test.val1, test.val2, test.val3); \
+ } \
+} while (0)
+
+#define INTEGER_TEST_BODY(t, ta) do { \
+ const ta##_test_t tests[] = TEST_CASES(t); \
+ for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \
+ ta##_test_t test = tests[i]; \
+ DO_TESTS(t, ta, test.val1, test.val2, test.val3); \
+ DO_INTEGER_TESTS(t, ta, test.val1, test.val2); \
+ } \
+} while (0)
+
+TEST_STRUCT(uint64_t, u64);
+TEST_BEGIN(test_atomic_u64) {
+#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
+ test_skip("64-bit atomic operations not supported");
+#else
+ INTEGER_TEST_BODY(uint64_t, u64);
+#endif
+}
+TEST_END
+
+
+TEST_STRUCT(uint32_t, u32);
+TEST_BEGIN(test_atomic_u32) {
+ INTEGER_TEST_BODY(uint32_t, u32);
+}
+TEST_END
+
+TEST_STRUCT(void *, p);
+TEST_BEGIN(test_atomic_p) {
+ TEST_BODY(void *, p);
+}
+TEST_END
+
+TEST_STRUCT(size_t, zu);
+TEST_BEGIN(test_atomic_zu) {
+ INTEGER_TEST_BODY(size_t, zu);
+}
+TEST_END
+
+TEST_STRUCT(ssize_t, zd);
+TEST_BEGIN(test_atomic_zd) {
+ INTEGER_TEST_BODY(ssize_t, zd);
+}
+TEST_END
+
+
+TEST_STRUCT(unsigned, u);
+TEST_BEGIN(test_atomic_u) {
+ INTEGER_TEST_BODY(unsigned, u);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_atomic_u64,
+ test_atomic_u32,
+ test_atomic_p,
+ test_atomic_zu,
+ test_atomic_zd,
+ test_atomic_u);
+}
diff --git a/deps/jemalloc/test/unit/background_thread.c b/deps/jemalloc/test/unit/background_thread.c
new file mode 100644
index 0000000..f7bd37c
--- /dev/null
+++ b/deps/jemalloc/test/unit/background_thread.c
@@ -0,0 +1,119 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/util.h"
+
+static void
+test_switch_background_thread_ctl(bool new_val) {
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+
+ e1 = new_val;
+ assert_d_eq(mallctl("background_thread", (void *)&e0, &sz,
+ &e1, sz), 0, "Unexpected mallctl() failure");
+ assert_b_eq(e0, !e1,
+ "background_thread should be %d before.\n", !e1);
+ if (e1) {
+ assert_zu_gt(n_background_threads, 0,
+ "Number of background threads should be non zero.\n");
+ } else {
+ assert_zu_eq(n_background_threads, 0,
+ "Number of background threads should be zero.\n");
+ }
+}
+
+static void
+test_repeat_background_thread_ctl(bool before) {
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+
+ e1 = before;
+ assert_d_eq(mallctl("background_thread", (void *)&e0, &sz,
+ &e1, sz), 0, "Unexpected mallctl() failure");
+ assert_b_eq(e0, before,
+ "background_thread should be %d.\n", before);
+ if (e1) {
+ assert_zu_gt(n_background_threads, 0,
+ "Number of background threads should be non zero.\n");
+ } else {
+ assert_zu_eq(n_background_threads, 0,
+ "Number of background threads should be zero.\n");
+ }
+}
+
+TEST_BEGIN(test_background_thread_ctl) {
+ test_skip_if(!have_background_thread);
+
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+
+ assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("background_thread", (void *)&e1, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ assert_b_eq(e0, e1,
+ "Default and opt.background_thread does not match.\n");
+ if (e0) {
+ test_switch_background_thread_ctl(false);
+ }
+ assert_zu_eq(n_background_threads, 0,
+ "Number of background threads should be 0.\n");
+
+ for (unsigned i = 0; i < 4; i++) {
+ test_switch_background_thread_ctl(true);
+ test_repeat_background_thread_ctl(true);
+ test_repeat_background_thread_ctl(true);
+
+ test_switch_background_thread_ctl(false);
+ test_repeat_background_thread_ctl(false);
+ test_repeat_background_thread_ctl(false);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_background_thread_running) {
+ test_skip_if(!have_background_thread);
+ test_skip_if(!config_stats);
+
+#if defined(JEMALLOC_BACKGROUND_THREAD)
+ tsd_t *tsd = tsd_fetch();
+ background_thread_info_t *info = &background_thread_info[0];
+
+ test_repeat_background_thread_ctl(false);
+ test_switch_background_thread_ctl(true);
+ assert_b_eq(info->state, background_thread_started,
+ "Background_thread did not start.\n");
+
+ nstime_t start, now;
+ nstime_init(&start, 0);
+ nstime_update(&start);
+
+ bool ran = false;
+ while (true) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ if (info->tot_n_runs > 0) {
+ ran = true;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ if (ran) {
+ break;
+ }
+
+ nstime_init(&now, 0);
+ nstime_update(&now);
+ nstime_subtract(&now, &start);
+ assert_u64_lt(nstime_sec(&now), 1000,
+ "Background threads did not run for 1000 seconds.");
+ sleep(1);
+ }
+ test_switch_background_thread_ctl(false);
+#endif
+}
+TEST_END
+
+int
+main(void) {
+ /* Background_thread creation tests reentrancy naturally. */
+ return test_no_reentrancy(
+ test_background_thread_ctl,
+ test_background_thread_running);
+}
diff --git a/deps/jemalloc/test/unit/background_thread_enable.c b/deps/jemalloc/test/unit/background_thread_enable.c
new file mode 100644
index 0000000..d894e93
--- /dev/null
+++ b/deps/jemalloc/test/unit/background_thread_enable.c
@@ -0,0 +1,85 @@
+#include "test/jemalloc_test.h"
+
+const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20";
+
+TEST_BEGIN(test_deferred) {
+ test_skip_if(!have_background_thread);
+
+ unsigned id;
+ size_t sz_u = sizeof(unsigned);
+
+ /*
+ * 10 here is somewhat arbitrary, except insofar as we want to ensure
+ * that the number of background threads is smaller than the number of
+ * arenas. I'll ragequit long before we have to spin up 10 threads per
+ * cpu to handle background purging, so this is a conservative
+ * approximation.
+ */
+ for (unsigned i = 0; i < 10 * ncpus; i++) {
+ assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
+ "Failed to create arena");
+ }
+
+ bool enable = true;
+ size_t sz_b = sizeof(bool);
+ assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ "Failed to enable background threads");
+ enable = false;
+ assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ "Failed to disable background threads");
+}
+TEST_END
+
+TEST_BEGIN(test_max_background_threads) {
+ test_skip_if(!have_background_thread);
+
+ size_t max_n_thds;
+ size_t opt_max_n_thds;
+ size_t sz_m = sizeof(max_n_thds);
+ assert_d_eq(mallctl("opt.max_background_threads",
+ &opt_max_n_thds, &sz_m, NULL, 0), 0,
+ "Failed to get opt.max_background_threads");
+ assert_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
+ 0), 0, "Failed to get max background threads");
+ assert_zu_eq(opt_max_n_thds, max_n_thds,
+ "max_background_threads and "
+ "opt.max_background_threads should match");
+ assert_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
+ sz_m), 0, "Failed to set max background threads");
+
+ unsigned id;
+ size_t sz_u = sizeof(unsigned);
+
+ for (unsigned i = 0; i < 10 * ncpus; i++) {
+ assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
+ "Failed to create arena");
+ }
+
+ bool enable = true;
+ size_t sz_b = sizeof(bool);
+ assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ "Failed to enable background threads");
+ assert_zu_eq(n_background_threads, max_n_thds,
+ "Number of background threads should not change.\n");
+ size_t new_max_thds = max_n_thds - 1;
+ if (new_max_thds > 0) {
+ assert_d_eq(mallctl("max_background_threads", NULL, NULL,
+ &new_max_thds, sz_m), 0,
+ "Failed to set max background threads");
+ assert_zu_eq(n_background_threads, new_max_thds,
+ "Number of background threads should decrease by 1.\n");
+ }
+ new_max_thds = 1;
+ assert_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
+ sz_m), 0, "Failed to set max background threads");
+ assert_zu_eq(n_background_threads, new_max_thds,
+ "Number of background threads should be 1.\n");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_deferred,
+ test_max_background_threads);
+}
diff --git a/deps/jemalloc/test/unit/base.c b/deps/jemalloc/test/unit/base.c
new file mode 100644
index 0000000..6b792cf
--- /dev/null
+++ b/deps/jemalloc/test/unit/base.c
@@ -0,0 +1,234 @@
+#include "test/jemalloc_test.h"
+
+#include "test/extent_hooks.h"
+
+static extent_hooks_t hooks_null = {
+ extent_alloc_hook,
+ NULL, /* dalloc */
+ NULL, /* destroy */
+ NULL, /* commit */
+ NULL, /* decommit */
+ NULL, /* purge_lazy */
+ NULL, /* purge_forced */
+ NULL, /* split */
+ NULL /* merge */
+};
+
+static extent_hooks_t hooks_not_null = {
+ extent_alloc_hook,
+ extent_dalloc_hook,
+ extent_destroy_hook,
+ NULL, /* commit */
+ extent_decommit_hook,
+ extent_purge_lazy_hook,
+ extent_purge_forced_hook,
+ NULL, /* split */
+ NULL /* merge */
+};
+
+TEST_BEGIN(test_base_hooks_default) {
+ base_t *base;
+ size_t allocated0, allocated1, resident, mapped, n_thp;
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+
+ if (config_stats) {
+ base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
+ &n_thp);
+ assert_zu_ge(allocated0, sizeof(base_t),
+ "Base header should count as allocated");
+ if (opt_metadata_thp == metadata_thp_always) {
+ assert_zu_gt(n_thp, 0,
+ "Base should have 1 THP at least.");
+ }
+ }
+
+ assert_ptr_not_null(base_alloc(tsdn, base, 42, 1),
+ "Unexpected base_alloc() failure");
+
+ if (config_stats) {
+ base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
+ &n_thp);
+ assert_zu_ge(allocated1 - allocated0, 42,
+ "At least 42 bytes were allocated by base_alloc()");
+ }
+
+ base_delete(tsdn, base);
+}
+TEST_END
+
+TEST_BEGIN(test_base_hooks_null) {
+ extent_hooks_t hooks_orig;
+ base_t *base;
+ size_t allocated0, allocated1, resident, mapped, n_thp;
+
+ extent_hooks_prep();
+ try_dalloc = false;
+ try_destroy = true;
+ try_decommit = false;
+ try_purge_lazy = false;
+ try_purge_forced = false;
+ memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
+ memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t));
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ base = base_new(tsdn, 0, &hooks);
+ assert_ptr_not_null(base, "Unexpected base_new() failure");
+
+ if (config_stats) {
+ base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
+ &n_thp);
+ assert_zu_ge(allocated0, sizeof(base_t),
+ "Base header should count as allocated");
+ if (opt_metadata_thp == metadata_thp_always) {
+ assert_zu_gt(n_thp, 0,
+ "Base should have 1 THP at least.");
+ }
+ }
+
+ assert_ptr_not_null(base_alloc(tsdn, base, 42, 1),
+ "Unexpected base_alloc() failure");
+
+ if (config_stats) {
+ base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
+ &n_thp);
+ assert_zu_ge(allocated1 - allocated0, 42,
+ "At least 42 bytes were allocated by base_alloc()");
+ }
+
+ base_delete(tsdn, base);
+
+ memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
+}
+TEST_END
+
+TEST_BEGIN(test_base_hooks_not_null) {
+ extent_hooks_t hooks_orig;
+ base_t *base;
+ void *p, *q, *r, *r_exp;
+
+ extent_hooks_prep();
+ try_dalloc = false;
+ try_destroy = true;
+ try_decommit = false;
+ try_purge_lazy = false;
+ try_purge_forced = false;
+ memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
+ memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ did_alloc = false;
+ base = base_new(tsdn, 0, &hooks);
+ assert_ptr_not_null(base, "Unexpected base_new() failure");
+ assert_true(did_alloc, "Expected alloc");
+
+ /*
+ * Check for tight packing at specified alignment under simple
+ * conditions.
+ */
+ {
+ const size_t alignments[] = {
+ 1,
+ QUANTUM,
+ QUANTUM << 1,
+ CACHELINE,
+ CACHELINE << 1,
+ };
+ unsigned i;
+
+ for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
+ size_t alignment = alignments[i];
+ size_t align_ceil = ALIGNMENT_CEILING(alignment,
+ QUANTUM);
+ p = base_alloc(tsdn, base, 1, alignment);
+ assert_ptr_not_null(p,
+ "Unexpected base_alloc() failure");
+ assert_ptr_eq(p,
+ (void *)(ALIGNMENT_CEILING((uintptr_t)p,
+ alignment)), "Expected quantum alignment");
+ q = base_alloc(tsdn, base, alignment, alignment);
+ assert_ptr_not_null(q,
+ "Unexpected base_alloc() failure");
+ assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
+ "Minimal allocation should take up %zu bytes",
+ align_ceil);
+ r = base_alloc(tsdn, base, 1, alignment);
+ assert_ptr_not_null(r,
+ "Unexpected base_alloc() failure");
+ assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
+ "Minimal allocation should take up %zu bytes",
+ align_ceil);
+ }
+ }
+
+ /*
+ * Allocate an object that cannot fit in the first block, then verify
+ * that the first block's remaining space is considered for subsequent
+ * allocation.
+ */
+ assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM,
+ "Remainder insufficient for test");
+ /* Use up all but one quantum of block. */
+ while (extent_bsize_get(&base->blocks->extent) > QUANTUM) {
+ p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
+ assert_ptr_not_null(p, "Unexpected base_alloc() failure");
+ }
+ r_exp = extent_addr_get(&base->blocks->extent);
+ assert_zu_eq(base->extent_sn_next, 1, "One extant block expected");
+ q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
+ assert_ptr_not_null(q, "Unexpected base_alloc() failure");
+ assert_ptr_ne(q, r_exp, "Expected allocation from new block");
+ assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
+ r = base_alloc(tsdn, base, QUANTUM, QUANTUM);
+ assert_ptr_not_null(r, "Unexpected base_alloc() failure");
+ assert_ptr_eq(r, r_exp, "Expected allocation from first block");
+ assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
+
+ /*
+ * Check for proper alignment support when normal blocks are too small.
+ */
+ {
+ const size_t alignments[] = {
+ HUGEPAGE,
+ HUGEPAGE << 1
+ };
+ unsigned i;
+
+ for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
+ size_t alignment = alignments[i];
+ p = base_alloc(tsdn, base, QUANTUM, alignment);
+ assert_ptr_not_null(p,
+ "Unexpected base_alloc() failure");
+ assert_ptr_eq(p,
+ (void *)(ALIGNMENT_CEILING((uintptr_t)p,
+ alignment)), "Expected %zu-byte alignment",
+ alignment);
+ }
+ }
+
+ called_dalloc = called_destroy = called_decommit = called_purge_lazy =
+ called_purge_forced = false;
+ base_delete(tsdn, base);
+ assert_true(called_dalloc, "Expected dalloc call");
+ assert_true(!called_destroy, "Unexpected destroy call");
+ assert_true(called_decommit, "Expected decommit call");
+ assert_true(called_purge_lazy, "Expected purge_lazy call");
+ assert_true(called_purge_forced, "Expected purge_forced call");
+
+ try_dalloc = true;
+ try_destroy = true;
+ try_decommit = true;
+ try_purge_lazy = true;
+ try_purge_forced = true;
+ memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_base_hooks_default,
+ test_base_hooks_null,
+ test_base_hooks_not_null);
+}
diff --git a/deps/jemalloc/test/unit/binshard.c b/deps/jemalloc/test/unit/binshard.c
new file mode 100644
index 0000000..d7a8df8
--- /dev/null
+++ b/deps/jemalloc/test/unit/binshard.c
@@ -0,0 +1,154 @@
+#include "test/jemalloc_test.h"
+
+/* Config -- "narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" */
+
+#define NTHREADS 16
+#define REMOTE_NALLOC 256
+
+static void *
+thd_producer(void *varg) {
+ void **mem = varg;
+ unsigned arena, i;
+ size_t sz;
+
+ sz = sizeof(arena);
+ /* Remote arena. */
+ assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ for (i = 0; i < REMOTE_NALLOC / 2; i++) {
+ mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena));
+ }
+
+ /* Remote bin. */
+ for (; i < REMOTE_NALLOC; i++) {
+ mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(0));
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_producer_consumer) {
+ thd_t thds[NTHREADS];
+ void *mem[NTHREADS][REMOTE_NALLOC];
+ unsigned i;
+
+ /* Create producer threads to allocate. */
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_producer, mem[i]);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+ /* Remote deallocation by the current thread. */
+ for (i = 0; i < NTHREADS; i++) {
+ for (unsigned j = 0; j < REMOTE_NALLOC; j++) {
+ assert_ptr_not_null(mem[i][j],
+ "Unexpected remote allocation failure");
+ dallocx(mem[i][j], 0);
+ }
+ }
+}
+TEST_END
+
+static void *
+thd_start(void *varg) {
+ void *ptr, *ptr2;
+ extent_t *extent;
+ unsigned shard1, shard2;
+
+ tsdn_t *tsdn = tsdn_fetch();
+ /* Try triggering allocations from sharded bins. */
+ for (unsigned i = 0; i < 1024; i++) {
+ ptr = mallocx(1, MALLOCX_TCACHE_NONE);
+ ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
+
+ extent = iealloc(tsdn, ptr);
+ shard1 = extent_binshard_get(extent);
+ dallocx(ptr, 0);
+ assert_u_lt(shard1, 16, "Unexpected bin shard used");
+
+ extent = iealloc(tsdn, ptr2);
+ shard2 = extent_binshard_get(extent);
+ dallocx(ptr2, 0);
+ assert_u_lt(shard2, 4, "Unexpected bin shard used");
+
+ if (shard1 > 0 || shard2 > 0) {
+ /* Triggered sharded bin usage. */
+ return (void *)(uintptr_t)shard1;
+ }
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_bin_shard_mt) {
+ test_skip_if(have_percpu_arena &&
+ PERCPU_ARENA_ENABLED(opt_percpu_arena));
+
+ thd_t thds[NTHREADS];
+ unsigned i;
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start, NULL);
+ }
+ bool sharded = false;
+ for (i = 0; i < NTHREADS; i++) {
+ void *ret;
+ thd_join(thds[i], &ret);
+ if (ret != NULL) {
+ sharded = true;
+ }
+ }
+ assert_b_eq(sharded, true, "Did not find sharded bins");
+}
+TEST_END
+
+TEST_BEGIN(test_bin_shard) {
+ unsigned nbins, i;
+ size_t mib[4], mib2[4];
+ size_t miblen, miblen2, len;
+
+ len = sizeof(nbins);
+ assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ miblen = 4;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ miblen2 = 4;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ for (i = 0; i < nbins; i++) {
+ uint32_t nshards;
+ size_t size, sz1, sz2;
+
+ mib[2] = i;
+ sz1 = sizeof(nshards);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
+ NULL, 0), 0, "Unexpected mallctlbymib() failure");
+
+ mib2[2] = i;
+ sz2 = sizeof(size);
+ assert_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
+ NULL, 0), 0, "Unexpected mallctlbymib() failure");
+
+ if (size >= 1 && size <= 128) {
+ assert_u_eq(nshards, 16, "Unexpected nshards");
+ } else if (size == 256) {
+ assert_u_eq(nshards, 8, "Unexpected nshards");
+ } else if (size > 128 && size <= 512) {
+ assert_u_eq(nshards, 4, "Unexpected nshards");
+ } else {
+ assert_u_eq(nshards, 1, "Unexpected nshards");
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_bin_shard,
+ test_bin_shard_mt,
+ test_producer_consumer);
+}
diff --git a/deps/jemalloc/test/unit/binshard.sh b/deps/jemalloc/test/unit/binshard.sh
new file mode 100644
index 0000000..c1d58c8
--- /dev/null
+++ b/deps/jemalloc/test/unit/binshard.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="narenas:1,bin_shards:1-160:16|129-512:4|256-256:8"
diff --git a/deps/jemalloc/test/unit/bit_util.c b/deps/jemalloc/test/unit/bit_util.c
new file mode 100644
index 0000000..b747deb
--- /dev/null
+++ b/deps/jemalloc/test/unit/bit_util.c
@@ -0,0 +1,111 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/bit_util.h"
+
+#define TEST_POW2_CEIL(t, suf, pri) do { \
+ unsigned i, pow2; \
+ t x; \
+ \
+ assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
+ \
+ for (i = 0; i < sizeof(t) * 8; i++) { \
+ assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
+ << i, "Unexpected result"); \
+ } \
+ \
+ for (i = 2; i < sizeof(t) * 8; i++) { \
+ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
+ ((t)1) << i, "Unexpected result"); \
+ } \
+ \
+ for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
+ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
+ ((t)1) << (i+1), "Unexpected result"); \
+ } \
+ \
+ for (pow2 = 1; pow2 < 25; pow2++) { \
+ for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
+ x++) { \
+ assert_##suf##_eq(pow2_ceil_##suf(x), \
+ ((t)1) << pow2, \
+ "Unexpected result, x=%"pri, x); \
+ } \
+ } \
+} while (0)
+
+TEST_BEGIN(test_pow2_ceil_u64) {
+ TEST_POW2_CEIL(uint64_t, u64, FMTu64);
+}
+TEST_END
+
+TEST_BEGIN(test_pow2_ceil_u32) {
+ TEST_POW2_CEIL(uint32_t, u32, FMTu32);
+}
+TEST_END
+
+TEST_BEGIN(test_pow2_ceil_zu) {
+ TEST_POW2_CEIL(size_t, zu, "zu");
+}
+TEST_END
+
+void
+assert_lg_ceil_range(size_t input, unsigned answer) {
+ if (input == 1) {
+ assert_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
+ return;
+ }
+ assert_zu_le(input, (ZU(1) << answer),
+ "Got %u as lg_ceil of %zu", answer, input);
+ assert_zu_gt(input, (ZU(1) << (answer - 1)),
+ "Got %u as lg_ceil of %zu", answer, input);
+}
+
+void
+assert_lg_floor_range(size_t input, unsigned answer) {
+ if (input == 1) {
+ assert_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
+ return;
+ }
+ assert_zu_ge(input, (ZU(1) << answer),
+ "Got %u as lg_floor of %zu", answer, input);
+ assert_zu_lt(input, (ZU(1) << (answer + 1)),
+ "Got %u as lg_floor of %zu", answer, input);
+}
+
+TEST_BEGIN(test_lg_ceil_floor) {
+ for (size_t i = 1; i < 10 * 1000 * 1000; i++) {
+ assert_lg_ceil_range(i, lg_ceil(i));
+ assert_lg_ceil_range(i, LG_CEIL(i));
+ assert_lg_floor_range(i, lg_floor(i));
+ assert_lg_floor_range(i, LG_FLOOR(i));
+ }
+ for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) {
+ for (size_t j = 0; j < (1 << 4); j++) {
+ size_t num1 = ((size_t)1 << i)
+ - j * ((size_t)1 << (i - 4));
+ size_t num2 = ((size_t)1 << i)
+ + j * ((size_t)1 << (i - 4));
+ assert_zu_ne(num1, 0, "Invalid lg argument");
+ assert_zu_ne(num2, 0, "Invalid lg argument");
+ assert_lg_ceil_range(num1, lg_ceil(num1));
+ assert_lg_ceil_range(num1, LG_CEIL(num1));
+ assert_lg_ceil_range(num2, lg_ceil(num2));
+ assert_lg_ceil_range(num2, LG_CEIL(num2));
+
+ assert_lg_floor_range(num1, lg_floor(num1));
+ assert_lg_floor_range(num1, LG_FLOOR(num1));
+ assert_lg_floor_range(num2, lg_floor(num2));
+ assert_lg_floor_range(num2, LG_FLOOR(num2));
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_pow2_ceil_u64,
+ test_pow2_ceil_u32,
+ test_pow2_ceil_zu,
+ test_lg_ceil_floor);
+}
diff --git a/deps/jemalloc/test/unit/bitmap.c b/deps/jemalloc/test/unit/bitmap.c
new file mode 100644
index 0000000..cafb203
--- /dev/null
+++ b/deps/jemalloc/test/unit/bitmap.c
@@ -0,0 +1,431 @@
+#include "test/jemalloc_test.h"
+
+#define NBITS_TAB \
+ NB( 1) \
+ NB( 2) \
+ NB( 3) \
+ NB( 4) \
+ NB( 5) \
+ NB( 6) \
+ NB( 7) \
+ NB( 8) \
+ NB( 9) \
+ NB(10) \
+ NB(11) \
+ NB(12) \
+ NB(13) \
+ NB(14) \
+ NB(15) \
+ NB(16) \
+ NB(17) \
+ NB(18) \
+ NB(19) \
+ NB(20) \
+ NB(21) \
+ NB(22) \
+ NB(23) \
+ NB(24) \
+ NB(25) \
+ NB(26) \
+ NB(27) \
+ NB(28) \
+ NB(29) \
+ NB(30) \
+ NB(31) \
+ NB(32) \
+ \
+ NB(33) \
+ NB(34) \
+ NB(35) \
+ NB(36) \
+ NB(37) \
+ NB(38) \
+ NB(39) \
+ NB(40) \
+ NB(41) \
+ NB(42) \
+ NB(43) \
+ NB(44) \
+ NB(45) \
+ NB(46) \
+ NB(47) \
+ NB(48) \
+ NB(49) \
+ NB(50) \
+ NB(51) \
+ NB(52) \
+ NB(53) \
+ NB(54) \
+ NB(55) \
+ NB(56) \
+ NB(57) \
+ NB(58) \
+ NB(59) \
+ NB(60) \
+ NB(61) \
+ NB(62) \
+ NB(63) \
+ NB(64) \
+ NB(65) \
+ \
+ NB(126) \
+ NB(127) \
+ NB(128) \
+ NB(129) \
+ NB(130) \
+ \
+ NB(254) \
+ NB(255) \
+ NB(256) \
+ NB(257) \
+ NB(258) \
+ \
+ NB(510) \
+ NB(511) \
+ NB(512) \
+ NB(513) \
+ NB(514) \
+ \
+ NB(1024) \
+ NB(2048) \
+ NB(4096) \
+ NB(8192) \
+ NB(16384) \
+
+static void
+test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) {
+ bitmap_info_t binfo_dyn;
+ bitmap_info_init(&binfo_dyn, nbits);
+
+ assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
+ "Unexpected difference between static and dynamic initialization, "
+ "nbits=%zu", nbits);
+ assert_zu_eq(binfo->nbits, binfo_dyn.nbits,
+ "Unexpected difference between static and dynamic initialization, "
+ "nbits=%zu", nbits);
+#ifdef BITMAP_USE_TREE
+ assert_u_eq(binfo->nlevels, binfo_dyn.nlevels,
+ "Unexpected difference between static and dynamic initialization, "
+ "nbits=%zu", nbits);
+ {
+ unsigned i;
+
+ for (i = 0; i < binfo->nlevels; i++) {
+ assert_zu_eq(binfo->levels[i].group_offset,
+ binfo_dyn.levels[i].group_offset,
+ "Unexpected difference between static and dynamic "
+ "initialization, nbits=%zu, level=%u", nbits, i);
+ }
+ }
+#else
+ assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
+ "Unexpected difference between static and dynamic initialization");
+#endif
+}
+
+TEST_BEGIN(test_bitmap_initializer) {
+#define NB(nbits) { \
+ if (nbits <= BITMAP_MAXBITS) { \
+ bitmap_info_t binfo = \
+ BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_initializer_body(&binfo, nbits); \
+ } \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static size_t
+test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
+ size_t prev_size) {
+ size_t size = bitmap_size(binfo);
+ assert_zu_ge(size, (nbits >> 3),
+ "Bitmap size is smaller than expected");
+ assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
+ return size;
+}
+
+TEST_BEGIN(test_bitmap_size) {
+ size_t nbits, prev_size;
+
+ prev_size = 0;
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ prev_size = test_bitmap_size_body(&binfo, nbits, prev_size);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ prev_size = test_bitmap_size_body(&binfo, nbits, \
+ prev_size); \
+ }
+ prev_size = 0;
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) {
+ size_t i;
+ bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
+ assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
+
+ bitmap_init(bitmap, binfo, false);
+ for (i = 0; i < nbits; i++) {
+ assert_false(bitmap_get(bitmap, binfo, i),
+ "Bit should be unset");
+ }
+
+ bitmap_init(bitmap, binfo, true);
+ for (i = 0; i < nbits; i++) {
+ assert_true(bitmap_get(bitmap, binfo, i), "Bit should be set");
+ }
+
+ free(bitmap);
+}
+
+TEST_BEGIN(test_bitmap_init) {
+ size_t nbits;
+
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ test_bitmap_init_body(&binfo, nbits);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_init_body(&binfo, nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) {
+ size_t i;
+ bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
+ assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ bitmap_init(bitmap, binfo, false);
+
+ for (i = 0; i < nbits; i++) {
+ bitmap_set(bitmap, binfo, i);
+ }
+ assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ free(bitmap);
+}
+
+TEST_BEGIN(test_bitmap_set) {
+ size_t nbits;
+
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ test_bitmap_set_body(&binfo, nbits);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_set_body(&binfo, nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) {
+ size_t i;
+ bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
+ assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ bitmap_init(bitmap, binfo, false);
+
+ for (i = 0; i < nbits; i++) {
+ bitmap_set(bitmap, binfo, i);
+ }
+ assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ for (i = 0; i < nbits; i++) {
+ bitmap_unset(bitmap, binfo, i);
+ }
+ for (i = 0; i < nbits; i++) {
+ bitmap_set(bitmap, binfo, i);
+ }
+ assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ free(bitmap);
+}
+
+TEST_BEGIN(test_bitmap_unset) {
+ size_t nbits;
+
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ test_bitmap_unset_body(&binfo, nbits);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_unset_body(&binfo, nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
+ bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
+ assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ bitmap_init(bitmap, binfo, false);
+
+ /* Iteratively set bits starting at the beginning. */
+ for (size_t i = 0; i < nbits; i++) {
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ "First unset bit should be just after previous first unset "
+ "bit");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ "First unset bit should be just after previous first unset "
+ "bit");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "First unset bit should be just after previous first unset "
+ "bit");
+ assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "First unset bit should be just after previous first unset "
+ "bit");
+ }
+ assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+
+ /*
+ * Iteratively unset bits starting at the end, and verify that
+ * bitmap_sfu() reaches the unset bits.
+ */
+ for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
+ bitmap_unset(bitmap, binfo, i);
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ "First unset bit should the bit previously unset");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ "First unset bit should the bit previously unset");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "First unset bit should the bit previously unset");
+ assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "First unset bit should the bit previously unset");
+ bitmap_unset(bitmap, binfo, i);
+ }
+ assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
+
+ /*
+ * Iteratively set bits starting at the beginning, and verify that
+ * bitmap_sfu() looks past them.
+ */
+ for (size_t i = 1; i < nbits; i++) {
+ bitmap_set(bitmap, binfo, i - 1);
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ "First unset bit should be just after the bit previously "
+ "set");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ "First unset bit should be just after the bit previously "
+ "set");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "First unset bit should be just after the bit previously "
+ "set");
+ assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "First unset bit should be just after the bit previously "
+ "set");
+ bitmap_unset(bitmap, binfo, i);
+ }
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1,
+ "First unset bit should be the last bit");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1),
+ nbits - 1, "First unset bit should be the last bit");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1,
+ "First unset bit should be the last bit");
+ assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
+ "First unset bit should be the last bit");
+ assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+
+ /*
+ * Bubble a "usu" pattern through the bitmap and verify that
+ * bitmap_ffu() finds the correct bit for all five min_bit cases.
+ */
+ if (nbits >= 3) {
+ for (size_t i = 0; i < nbits-2; i++) {
+ bitmap_unset(bitmap, binfo, i);
+ bitmap_unset(bitmap, binfo, i+2);
+ if (i > 0) {
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
+ "Unexpected first unset bit");
+ }
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "Unexpected first unset bit");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2,
+ "Unexpected first unset bit");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2,
+ "Unexpected first unset bit");
+ if (i + 3 < nbits) {
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i+3),
+ nbits, "Unexpected first unset bit");
+ }
+ assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "Unexpected first unset bit");
+ assert_zu_eq(bitmap_sfu(bitmap, binfo), i+2,
+ "Unexpected first unset bit");
+ }
+ }
+
+ /*
+ * Unset the last bit, bubble another unset bit through the bitmap, and
+ * verify that bitmap_ffu() finds the correct bit for all four min_bit
+ * cases.
+ */
+ if (nbits >= 3) {
+ bitmap_unset(bitmap, binfo, nbits-1);
+ for (size_t i = 0; i < nbits-1; i++) {
+ bitmap_unset(bitmap, binfo, i);
+ if (i > 0) {
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
+ "Unexpected first unset bit");
+ }
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ "Unexpected first unset bit");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1,
+ "Unexpected first unset bit");
+ assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1),
+ nbits-1, "Unexpected first unset bit");
+
+ assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ "Unexpected first unset bit");
+ }
+ assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1,
+ "Unexpected first unset bit");
+ }
+
+ free(bitmap);
+}
+
+TEST_BEGIN(test_bitmap_xfu) {
+ size_t nbits;
+
+ for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ bitmap_info_t binfo;
+ bitmap_info_init(&binfo, nbits);
+ test_bitmap_xfu_body(&binfo, nbits);
+ }
+#define NB(nbits) { \
+ bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \
+ test_bitmap_xfu_body(&binfo, nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_bitmap_initializer,
+ test_bitmap_size,
+ test_bitmap_init,
+ test_bitmap_set,
+ test_bitmap_unset,
+ test_bitmap_xfu);
+}
diff --git a/deps/jemalloc/test/unit/ckh.c b/deps/jemalloc/test/unit/ckh.c
new file mode 100644
index 0000000..707ea5f
--- /dev/null
+++ b/deps/jemalloc/test/unit/ckh.c
@@ -0,0 +1,211 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_new_delete) {
+ tsd_t *tsd;
+ ckh_t ckh;
+
+ tsd = tsd_fetch();
+
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
+ ckh_string_keycomp), "Unexpected ckh_new() error");
+ ckh_delete(tsd, &ckh);
+
+ assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
+ ckh_pointer_keycomp), "Unexpected ckh_new() error");
+ ckh_delete(tsd, &ckh);
+}
+TEST_END
+
+TEST_BEGIN(test_count_insert_search_remove) {
+ tsd_t *tsd;
+ ckh_t ckh;
+ const char *strs[] = {
+ "a string",
+ "A string",
+ "a string.",
+ "A string."
+ };
+ const char *missing = "A string not in the hash table.";
+ size_t i;
+
+ tsd = tsd_fetch();
+
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
+ ckh_string_keycomp), "Unexpected ckh_new() error");
+ assert_zu_eq(ckh_count(&ckh), 0,
+ "ckh_count() should return %zu, but it returned %zu", ZU(0),
+ ckh_count(&ckh));
+
+ /* Insert. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ ckh_insert(tsd, &ckh, strs[i], strs[i]);
+ assert_zu_eq(ckh_count(&ckh), i+1,
+ "ckh_count() should return %zu, but it returned %zu", i+1,
+ ckh_count(&ckh));
+ }
+
+ /* Search. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ union {
+ void *p;
+ const char *s;
+ } k, v;
+ void **kp, **vp;
+ const char *ks, *vs;
+
+ kp = (i & 1) ? &k.p : NULL;
+ vp = (i & 2) ? &v.p : NULL;
+ k.p = NULL;
+ v.p = NULL;
+ assert_false(ckh_search(&ckh, strs[i], kp, vp),
+ "Unexpected ckh_search() error");
+
+ ks = (i & 1) ? strs[i] : (const char *)NULL;
+ vs = (i & 2) ? strs[i] : (const char *)NULL;
+ assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ i);
+ assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ i);
+ }
+ assert_true(ckh_search(&ckh, missing, NULL, NULL),
+ "Unexpected ckh_search() success");
+
+ /* Remove. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ union {
+ void *p;
+ const char *s;
+ } k, v;
+ void **kp, **vp;
+ const char *ks, *vs;
+
+ kp = (i & 1) ? &k.p : NULL;
+ vp = (i & 2) ? &v.p : NULL;
+ k.p = NULL;
+ v.p = NULL;
+ assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
+ "Unexpected ckh_remove() error");
+
+ ks = (i & 1) ? strs[i] : (const char *)NULL;
+ vs = (i & 2) ? strs[i] : (const char *)NULL;
+ assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ i);
+ assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ i);
+ assert_zu_eq(ckh_count(&ckh),
+ sizeof(strs)/sizeof(const char *) - i - 1,
+ "ckh_count() should return %zu, but it returned %zu",
+ sizeof(strs)/sizeof(const char *) - i - 1,
+ ckh_count(&ckh));
+ }
+
+ ckh_delete(tsd, &ckh);
+}
+TEST_END
+
+TEST_BEGIN(test_insert_iter_remove) {
+#define NITEMS ZU(1000)
+ tsd_t *tsd;
+ ckh_t ckh;
+ void **p[NITEMS];
+ void *q, *r;
+ size_t i;
+
+ tsd = tsd_fetch();
+
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
+ ckh_pointer_keycomp), "Unexpected ckh_new() error");
+
+ for (i = 0; i < NITEMS; i++) {
+ p[i] = mallocx(i+1, 0);
+ assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
+ }
+
+ for (i = 0; i < NITEMS; i++) {
+ size_t j;
+
+ for (j = i; j < NITEMS; j++) {
+ assert_false(ckh_insert(tsd, &ckh, p[j], p[j]),
+ "Unexpected ckh_insert() failure");
+ assert_false(ckh_search(&ckh, p[j], &q, &r),
+ "Unexpected ckh_search() failure");
+ assert_ptr_eq(p[j], q, "Key pointer mismatch");
+ assert_ptr_eq(p[j], r, "Value pointer mismatch");
+ }
+
+ assert_zu_eq(ckh_count(&ckh), NITEMS,
+ "ckh_count() should return %zu, but it returned %zu",
+ NITEMS, ckh_count(&ckh));
+
+ for (j = i + 1; j < NITEMS; j++) {
+ assert_false(ckh_search(&ckh, p[j], NULL, NULL),
+ "Unexpected ckh_search() failure");
+ assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
+ "Unexpected ckh_remove() failure");
+ assert_ptr_eq(p[j], q, "Key pointer mismatch");
+ assert_ptr_eq(p[j], r, "Value pointer mismatch");
+ assert_true(ckh_search(&ckh, p[j], NULL, NULL),
+ "Unexpected ckh_search() success");
+ assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
+ "Unexpected ckh_remove() success");
+ }
+
+ {
+ bool seen[NITEMS];
+ size_t tabind;
+
+ memset(seen, 0, sizeof(seen));
+
+ for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
+ size_t k;
+
+ assert_ptr_eq(q, r, "Key and val not equal");
+
+ for (k = 0; k < NITEMS; k++) {
+ if (p[k] == q) {
+ assert_false(seen[k],
+ "Item %zu already seen", k);
+ seen[k] = true;
+ break;
+ }
+ }
+ }
+
+ for (j = 0; j < i + 1; j++) {
+ assert_true(seen[j], "Item %zu not seen", j);
+ }
+ for (; j < NITEMS; j++) {
+ assert_false(seen[j], "Item %zu seen", j);
+ }
+ }
+ }
+
+ for (i = 0; i < NITEMS; i++) {
+ assert_false(ckh_search(&ckh, p[i], NULL, NULL),
+ "Unexpected ckh_search() failure");
+ assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
+ "Unexpected ckh_remove() failure");
+ assert_ptr_eq(p[i], q, "Key pointer mismatch");
+ assert_ptr_eq(p[i], r, "Value pointer mismatch");
+ assert_true(ckh_search(&ckh, p[i], NULL, NULL),
+ "Unexpected ckh_search() success");
+ assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
+ "Unexpected ckh_remove() success");
+ dallocx(p[i], 0);
+ }
+
+ assert_zu_eq(ckh_count(&ckh), 0,
+ "ckh_count() should return %zu, but it returned %zu",
+ ZU(0), ckh_count(&ckh));
+ ckh_delete(tsd, &ckh);
+#undef NITEMS
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_new_delete,
+ test_count_insert_search_remove,
+ test_insert_iter_remove);
+}
diff --git a/deps/jemalloc/test/unit/decay.c b/deps/jemalloc/test/unit/decay.c
new file mode 100644
index 0000000..cf3c079
--- /dev/null
+++ b/deps/jemalloc/test/unit/decay.c
@@ -0,0 +1,605 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ticker.h"
+
+static nstime_monotonic_t *nstime_monotonic_orig;
+static nstime_update_t *nstime_update_orig;
+
+static unsigned nupdates_mock;
+static nstime_t time_mock;
+static bool monotonic_mock;
+
+static bool
+check_background_thread_enabled(void) {
+ bool enabled;
+ size_t sz = sizeof(bool);
+ int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0);
+ if (ret == ENOENT) {
+ return false;
+ }
+ assert_d_eq(ret, 0, "Unexpected mallctl error");
+ return enabled;
+}
+
+static bool
+nstime_monotonic_mock(void) {
+ return monotonic_mock;
+}
+
+static bool
+nstime_update_mock(nstime_t *time) {
+ nupdates_mock++;
+ if (monotonic_mock) {
+ nstime_copy(time, &time_mock);
+ }
+ return !monotonic_mock;
+}
+
+static unsigned
+do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
+ unsigned arena_ind;
+ size_t sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+
+ assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
+ "Unexpected mallctlbymib() failure");
+
+ assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
+ "Unexpected mallctlbymib() failure");
+
+ return arena_ind;
+}
+
+static void
+do_arena_destroy(unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+void
+do_epoch(void) {
+ uint64_t epoch = 1;
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+}
+
+void
+do_purge(unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+void
+do_decay(unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+static uint64_t
+get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[2] = (size_t)arena_ind;
+ uint64_t npurge = 0;
+ size_t sz = sizeof(npurge);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
+ config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
+ return npurge;
+}
+
+static uint64_t
+get_arena_dirty_npurge(unsigned arena_ind) {
+ do_epoch();
+ return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
+}
+
+static uint64_t
+get_arena_dirty_purged(unsigned arena_ind) {
+ do_epoch();
+ return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
+}
+
+static uint64_t
+get_arena_muzzy_npurge(unsigned arena_ind) {
+ do_epoch();
+ return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
+}
+
+static uint64_t
+get_arena_npurge(unsigned arena_ind) {
+ do_epoch();
+ return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
+ get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
+}
+
+static size_t
+get_arena_pdirty(unsigned arena_ind) {
+ do_epoch();
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[2] = (size_t)arena_ind;
+ size_t pdirty;
+ size_t sz = sizeof(pdirty);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+ return pdirty;
+}
+
+static size_t
+get_arena_pmuzzy(unsigned arena_ind) {
+ do_epoch();
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[2] = (size_t)arena_ind;
+ size_t pmuzzy;
+ size_t sz = sizeof(pmuzzy);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+ return pmuzzy;
+}
+
+static void *
+do_mallocx(size_t size, int flags) {
+ void *p = mallocx(size, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ return p;
+}
+
+static void
+generate_dirty(unsigned arena_ind, size_t size) {
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+ void *p = do_mallocx(size, flags);
+ dallocx(p, flags);
+}
+
+TEST_BEGIN(test_decay_ticks) {
+ test_skip_if(check_background_thread_enabled());
+
+ ticker_t *decay_ticker;
+ unsigned tick0, tick1, arena_ind;
+ size_t sz, large0;
+ void *p;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ /* Set up a manually managed arena for test. */
+ arena_ind = do_arena_create(0, 0);
+
+ /* Migrate to the new arena, and get the ticker. */
+ unsigned old_arena_ind;
+ size_t sz_arena_ind = sizeof(old_arena_ind);
+ assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
+ &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
+ "Unexpected mallctl() failure");
+ decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind);
+ assert_ptr_not_null(decay_ticker,
+ "Unexpected failure getting decay ticker");
+
+ /*
+ * Test the standard APIs using a large size class, since we can't
+ * control tcache interactions for small size classes (except by
+ * completely disabling tcache for the entire test program).
+ */
+
+ /* malloc(). */
+ tick0 = ticker_read(decay_ticker);
+ p = malloc(large0);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
+ /* free(). */
+ tick0 = ticker_read(decay_ticker);
+ free(p);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
+
+ /* calloc(). */
+ tick0 = ticker_read(decay_ticker);
+ p = calloc(1, large0);
+ assert_ptr_not_null(p, "Unexpected calloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
+ free(p);
+
+ /* posix_memalign(). */
+ tick0 = ticker_read(decay_ticker);
+ assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
+ "Unexpected posix_memalign() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during posix_memalign()");
+ free(p);
+
+ /* aligned_alloc(). */
+ tick0 = ticker_read(decay_ticker);
+ p = aligned_alloc(sizeof(size_t), large0);
+ assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during aligned_alloc()");
+ free(p);
+
+ /* realloc(). */
+ /* Allocate. */
+ tick0 = ticker_read(decay_ticker);
+ p = realloc(NULL, large0);
+ assert_ptr_not_null(p, "Unexpected realloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+ /* Reallocate. */
+ tick0 = ticker_read(decay_ticker);
+ p = realloc(p, large0);
+ assert_ptr_not_null(p, "Unexpected realloc() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+ /* Deallocate. */
+ tick0 = ticker_read(decay_ticker);
+ realloc(p, 0);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+
+ /*
+ * Test the *allocx() APIs using large and small size classes, with
+ * tcache explicitly disabled.
+ */
+ {
+ unsigned i;
+ size_t allocx_sizes[2];
+ allocx_sizes[0] = large0;
+ allocx_sizes[1] = 1;
+
+ for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
+ sz = allocx_sizes[i];
+
+ /* mallocx(). */
+ tick0 = ticker_read(decay_ticker);
+ p = mallocx(sz, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during mallocx() (sz=%zu)",
+ sz);
+ /* rallocx(). */
+ tick0 = ticker_read(decay_ticker);
+ p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "Unexpected rallocx() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during rallocx() (sz=%zu)",
+ sz);
+ /* xallocx(). */
+ tick0 = ticker_read(decay_ticker);
+ xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during xallocx() (sz=%zu)",
+ sz);
+ /* dallocx(). */
+ tick0 = ticker_read(decay_ticker);
+ dallocx(p, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during dallocx() (sz=%zu)",
+ sz);
+ /* sdallocx(). */
+ p = mallocx(sz, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick0 = ticker_read(decay_ticker);
+ sdallocx(p, sz, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during sdallocx() "
+ "(sz=%zu)", sz);
+ }
+ }
+
+ /*
+ * Test tcache fill/flush interactions for large and small size classes,
+ * using an explicit tcache.
+ */
+ unsigned tcache_ind, i;
+ size_t tcache_sizes[2];
+ tcache_sizes[0] = large0;
+ tcache_sizes[1] = 1;
+
+ size_t tcache_max, sz_tcache_max;
+ sz_tcache_max = sizeof(tcache_max);
+ assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
+ &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
+ sz = tcache_sizes[i];
+
+ /* tcache fill. */
+ tick0 = ticker_read(decay_ticker);
+ p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick1 = ticker_read(decay_ticker);
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during tcache fill "
+ "(sz=%zu)", sz);
+ /* tcache flush. */
+ dallocx(p, MALLOCX_TCACHE(tcache_ind));
+ tick0 = ticker_read(decay_ticker);
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL,
+ (void *)&tcache_ind, sizeof(unsigned)), 0,
+ "Unexpected mallctl failure");
+ tick1 = ticker_read(decay_ticker);
+
+ /* Will only tick if it's in tcache. */
+ if (sz <= tcache_max) {
+ assert_u32_ne(tick1, tick0,
+ "Expected ticker to tick during tcache "
+ "flush (sz=%zu)", sz);
+ } else {
+ assert_u32_eq(tick1, tick0,
+ "Unexpected ticker tick during tcache "
+ "flush (sz=%zu)", sz);
+ }
+ }
+}
+TEST_END
+
+static void
+decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
+ uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
+#define NINTERVALS 101
+ nstime_t time, update_interval, decay_ms, deadline;
+
+ nstime_init(&time, 0);
+ nstime_update(&time);
+
+ nstime_init2(&decay_ms, dt, 0);
+ nstime_copy(&deadline, &time);
+ nstime_add(&deadline, &decay_ms);
+
+ nstime_init2(&update_interval, dt, 0);
+ nstime_idivide(&update_interval, NINTERVALS);
+
+ /*
+ * Keep q's slab from being deallocated during the looping below. If a
+ * cached slab were to repeatedly come and go during looping, it could
+ * prevent the decay backlog ever becoming empty.
+ */
+ void *p = do_mallocx(1, flags);
+ uint64_t dirty_npurge1, muzzy_npurge1;
+ do {
+ for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2;
+ i++) {
+ void *q = do_mallocx(1, flags);
+ dallocx(q, flags);
+ }
+ dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
+ muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
+
+ nstime_add(&time_mock, &update_interval);
+ nstime_update(&time);
+ } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
+ dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
+ !terminate_asap));
+ dallocx(p, flags);
+
+ if (config_stats) {
+ assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
+ muzzy_npurge0, "Expected purging to occur");
+ }
+#undef NINTERVALS
+}
+
+TEST_BEGIN(test_decay_ticker) {
+ test_skip_if(check_background_thread_enabled());
+#define NPS 2048
+ ssize_t ddt = opt_dirty_decay_ms;
+ ssize_t mdt = opt_muzzy_decay_ms;
+ unsigned arena_ind = do_arena_create(ddt, mdt);
+ int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
+ void *ps[NPS];
+ size_t large;
+
+ /*
+ * Allocate a bunch of large objects, pause the clock, deallocate every
+ * other object (to fragment virtual memory), restore the clock, then
+ * [md]allocx() in a tight loop while advancing time rapidly to verify
+ * the ticker triggers purging.
+ */
+
+ size_t tcache_max;
+ size_t sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+ large = nallocx(tcache_max + 1, flags);
+
+ do_purge(arena_ind);
+ uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
+ uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
+
+ for (unsigned i = 0; i < NPS; i++) {
+ ps[i] = do_mallocx(large, flags);
+ }
+
+ nupdates_mock = 0;
+ nstime_init(&time_mock, 0);
+ nstime_update(&time_mock);
+ monotonic_mock = true;
+
+ nstime_monotonic_orig = nstime_monotonic;
+ nstime_update_orig = nstime_update;
+ nstime_monotonic = nstime_monotonic_mock;
+ nstime_update = nstime_update_mock;
+
+ for (unsigned i = 0; i < NPS; i += 2) {
+ dallocx(ps[i], flags);
+ unsigned nupdates0 = nupdates_mock;
+ do_decay(arena_ind);
+ assert_u_gt(nupdates_mock, nupdates0,
+ "Expected nstime_update() to be called");
+ }
+
+ decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
+ muzzy_npurge0, true);
+ decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
+ muzzy_npurge0, false);
+
+ do_arena_destroy(arena_ind);
+
+ nstime_monotonic = nstime_monotonic_orig;
+ nstime_update = nstime_update_orig;
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_nonmonotonic) {
+ test_skip_if(check_background_thread_enabled());
+#define NPS (SMOOTHSTEP_NSTEPS + 1)
+ int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+ void *ps[NPS];
+ uint64_t npurge0 = 0;
+ uint64_t npurge1 = 0;
+ size_t sz, large0;
+ unsigned i, nupdates0;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ do_epoch();
+ sz = sizeof(uint64_t);
+ npurge0 = get_arena_npurge(0);
+
+ nupdates_mock = 0;
+ nstime_init(&time_mock, 0);
+ nstime_update(&time_mock);
+ monotonic_mock = false;
+
+ nstime_monotonic_orig = nstime_monotonic;
+ nstime_update_orig = nstime_update;
+ nstime_monotonic = nstime_monotonic_mock;
+ nstime_update = nstime_update_mock;
+
+ for (i = 0; i < NPS; i++) {
+ ps[i] = mallocx(large0, flags);
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+ }
+
+ for (i = 0; i < NPS; i++) {
+ dallocx(ps[i], flags);
+ nupdates0 = nupdates_mock;
+ assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.decay failure");
+ assert_u_gt(nupdates_mock, nupdates0,
+ "Expected nstime_update() to be called");
+ }
+
+ do_epoch();
+ sz = sizeof(uint64_t);
+ npurge1 = get_arena_npurge(0);
+
+ if (config_stats) {
+ assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
+ }
+
+ nstime_monotonic = nstime_monotonic_orig;
+ nstime_update = nstime_update_orig;
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_now) {
+ test_skip_if(check_background_thread_enabled());
+
+ unsigned arena_ind = do_arena_create(0, 0);
+ assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
+ assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
+ size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
+ /* Verify that dirty/muzzy pages never linger after deallocation. */
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ size_t size = sizes[i];
+ generate_dirty(arena_ind, size);
+ assert_zu_eq(get_arena_pdirty(arena_ind), 0,
+ "Unexpected dirty pages");
+ assert_zu_eq(get_arena_pmuzzy(arena_ind), 0,
+ "Unexpected muzzy pages");
+ }
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+TEST_BEGIN(test_decay_never) {
+ test_skip_if(check_background_thread_enabled() || !config_stats);
+
+ unsigned arena_ind = do_arena_create(-1, -1);
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+ assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
+ assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
+ size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
+ void *ptrs[sizeof(sizes)/sizeof(size_t)];
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ ptrs[i] = do_mallocx(sizes[i], flags);
+ }
+ /* Verify that each deallocation generates additional dirty pages. */
+ size_t pdirty_prev = get_arena_pdirty(arena_ind);
+ size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
+ assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
+ assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ dallocx(ptrs[i], flags);
+ size_t pdirty = get_arena_pdirty(arena_ind);
+ size_t pmuzzy = get_arena_pmuzzy(arena_ind);
+ assert_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
+ pdirty_prev, "Expected dirty pages to increase.");
+ assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
+ pdirty_prev = pdirty;
+ }
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_decay_ticks,
+ test_decay_ticker,
+ test_decay_nonmonotonic,
+ test_decay_now,
+ test_decay_never);
+}
diff --git a/deps/jemalloc/test/unit/decay.sh b/deps/jemalloc/test/unit/decay.sh
new file mode 100644
index 0000000..45aeccf
--- /dev/null
+++ b/deps/jemalloc/test/unit/decay.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,lg_tcache_max:0"
diff --git a/deps/jemalloc/test/unit/div.c b/deps/jemalloc/test/unit/div.c
new file mode 100644
index 0000000..b47f10b
--- /dev/null
+++ b/deps/jemalloc/test/unit/div.c
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/div.h"
+
+TEST_BEGIN(test_div_exhaustive) {
+ for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) {
+ div_info_t div_info;
+ div_init(&div_info, divisor);
+ size_t max = 1000 * divisor;
+ if (max < 1000 * 1000) {
+ max = 1000 * 1000;
+ }
+ for (size_t dividend = 0; dividend < 1000 * divisor;
+ dividend += divisor) {
+ size_t quotient = div_compute(
+ &div_info, dividend);
+ assert_zu_eq(dividend, quotient * divisor,
+ "With divisor = %zu, dividend = %zu, "
+ "got quotient %zu", divisor, dividend, quotient);
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_div_exhaustive);
+}
diff --git a/deps/jemalloc/test/unit/emitter.c b/deps/jemalloc/test/unit/emitter.c
new file mode 100644
index 0000000..b4a693f
--- /dev/null
+++ b/deps/jemalloc/test/unit/emitter.c
@@ -0,0 +1,469 @@
+#include "test/jemalloc_test.h"
+#include "jemalloc/internal/emitter.h"
+
+/*
+ * This is so useful for debugging and feature work, we'll leave printing
+ * functionality committed but disabled by default.
+ */
+/* Print the text as it will appear. */
+static bool print_raw = false;
+/* Print the text escaped, so it can be copied back into the test case. */
+static bool print_escaped = false;
+
+typedef struct buf_descriptor_s buf_descriptor_t;
+struct buf_descriptor_s {
+ char *buf;
+ size_t len;
+ bool mid_quote;
+};
+
+/*
+ * Forwards all writes to the passed-in buf_v (which should be cast from a
+ * buf_descriptor_t *).
+ */
+static void
+forwarding_cb(void *buf_descriptor_v, const char *str) {
+ buf_descriptor_t *buf_descriptor = (buf_descriptor_t *)buf_descriptor_v;
+
+ if (print_raw) {
+ malloc_printf("%s", str);
+ }
+ if (print_escaped) {
+ const char *it = str;
+ while (*it != '\0') {
+ if (!buf_descriptor->mid_quote) {
+ malloc_printf("\"");
+ buf_descriptor->mid_quote = true;
+ }
+ switch (*it) {
+ case '\\':
+ malloc_printf("\\");
+ break;
+ case '\"':
+ malloc_printf("\\\"");
+ break;
+ case '\t':
+ malloc_printf("\\t");
+ break;
+ case '\n':
+ malloc_printf("\\n\"\n");
+ buf_descriptor->mid_quote = false;
+ break;
+ default:
+ malloc_printf("%c", *it);
+ }
+ it++;
+ }
+ }
+
+ size_t written = malloc_snprintf(buf_descriptor->buf,
+ buf_descriptor->len, "%s", str);
+ assert_zu_eq(written, strlen(str), "Buffer overflow!");
+ buf_descriptor->buf += written;
+ buf_descriptor->len -= written;
+ assert_zu_gt(buf_descriptor->len, 0, "Buffer out of space!");
+}
+
+static void
+assert_emit_output(void (*emit_fn)(emitter_t *),
+ const char *expected_json_output, const char *expected_table_output) {
+ emitter_t emitter;
+ char buf[MALLOC_PRINTF_BUFSIZE];
+ buf_descriptor_t buf_descriptor;
+
+ buf_descriptor.buf = buf;
+ buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
+ buf_descriptor.mid_quote = false;
+
+ emitter_init(&emitter, emitter_output_json, &forwarding_cb,
+ &buf_descriptor);
+ (*emit_fn)(&emitter);
+ assert_str_eq(expected_json_output, buf, "json output failure");
+
+ buf_descriptor.buf = buf;
+ buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
+ buf_descriptor.mid_quote = false;
+
+ emitter_init(&emitter, emitter_output_table, &forwarding_cb,
+ &buf_descriptor);
+ (*emit_fn)(&emitter);
+ assert_str_eq(expected_table_output, buf, "table output failure");
+}
+
+static void
+emit_dict(emitter_t *emitter) {
+ bool b_false = false;
+ bool b_true = true;
+ int i_123 = 123;
+ const char *str = "a string";
+
+ emitter_begin(emitter);
+ emitter_dict_begin(emitter, "foo", "This is the foo table:");
+ emitter_kv(emitter, "abc", "ABC", emitter_type_bool, &b_false);
+ emitter_kv(emitter, "def", "DEF", emitter_type_bool, &b_true);
+ emitter_kv_note(emitter, "ghi", "GHI", emitter_type_int, &i_123,
+ "note_key1", emitter_type_string, &str);
+ emitter_kv_note(emitter, "jkl", "JKL", emitter_type_string, &str,
+ "note_key2", emitter_type_bool, &b_false);
+ emitter_dict_end(emitter);
+ emitter_end(emitter);
+}
+static const char *dict_json =
+"{\n"
+"\t\"foo\": {\n"
+"\t\t\"abc\": false,\n"
+"\t\t\"def\": true,\n"
+"\t\t\"ghi\": 123,\n"
+"\t\t\"jkl\": \"a string\"\n"
+"\t}\n"
+"}\n";
+static const char *dict_table =
+"This is the foo table:\n"
+" ABC: false\n"
+" DEF: true\n"
+" GHI: 123 (note_key1: \"a string\")\n"
+" JKL: \"a string\" (note_key2: false)\n";
+
+TEST_BEGIN(test_dict) {
+ assert_emit_output(&emit_dict, dict_json, dict_table);
+}
+TEST_END
+
+static void
+emit_table_printf(emitter_t *emitter) {
+ emitter_begin(emitter);
+ emitter_table_printf(emitter, "Table note 1\n");
+ emitter_table_printf(emitter, "Table note 2 %s\n",
+ "with format string");
+ emitter_end(emitter);
+}
+
+static const char *table_printf_json =
+"{\n"
+"}\n";
+
+static const char *table_printf_table =
+"Table note 1\n"
+"Table note 2 with format string\n";
+
+TEST_BEGIN(test_table_printf) {
+ assert_emit_output(&emit_table_printf, table_printf_json,
+ table_printf_table);
+}
+TEST_END
+
+static void emit_nested_dict(emitter_t *emitter) {
+ int val = 123;
+ emitter_begin(emitter);
+ emitter_dict_begin(emitter, "json1", "Dict 1");
+ emitter_dict_begin(emitter, "json2", "Dict 2");
+ emitter_kv(emitter, "primitive", "A primitive", emitter_type_int, &val);
+ emitter_dict_end(emitter); /* Close 2 */
+ emitter_dict_begin(emitter, "json3", "Dict 3");
+ emitter_dict_end(emitter); /* Close 3 */
+ emitter_dict_end(emitter); /* Close 1 */
+ emitter_dict_begin(emitter, "json4", "Dict 4");
+ emitter_kv(emitter, "primitive", "Another primitive",
+ emitter_type_int, &val);
+ emitter_dict_end(emitter); /* Close 4 */
+ emitter_end(emitter);
+}
+
+static const char *nested_object_json =
+"{\n"
+"\t\"json1\": {\n"
+"\t\t\"json2\": {\n"
+"\t\t\t\"primitive\": 123\n"
+"\t\t},\n"
+"\t\t\"json3\": {\n"
+"\t\t}\n"
+"\t},\n"
+"\t\"json4\": {\n"
+"\t\t\"primitive\": 123\n"
+"\t}\n"
+"}\n";
+
+static const char *nested_object_table =
+"Dict 1\n"
+" Dict 2\n"
+" A primitive: 123\n"
+" Dict 3\n"
+"Dict 4\n"
+" Another primitive: 123\n";
+
+TEST_BEGIN(test_nested_dict) {
+ assert_emit_output(&emit_nested_dict, nested_object_json,
+ nested_object_table);
+}
+TEST_END
+
+static void
+emit_types(emitter_t *emitter) {
+ bool b = false;
+ int i = -123;
+ unsigned u = 123;
+ ssize_t zd = -456;
+ size_t zu = 456;
+ const char *str = "string";
+ uint32_t u32 = 789;
+ uint64_t u64 = 10000000000ULL;
+
+ emitter_begin(emitter);
+ emitter_kv(emitter, "k1", "K1", emitter_type_bool, &b);
+ emitter_kv(emitter, "k2", "K2", emitter_type_int, &i);
+ emitter_kv(emitter, "k3", "K3", emitter_type_unsigned, &u);
+ emitter_kv(emitter, "k4", "K4", emitter_type_ssize, &zd);
+ emitter_kv(emitter, "k5", "K5", emitter_type_size, &zu);
+ emitter_kv(emitter, "k6", "K6", emitter_type_string, &str);
+ emitter_kv(emitter, "k7", "K7", emitter_type_uint32, &u32);
+ emitter_kv(emitter, "k8", "K8", emitter_type_uint64, &u64);
+ /*
+ * We don't test the title type, since it's only used for tables. It's
+ * tested in the emitter_table_row tests.
+ */
+ emitter_end(emitter);
+}
+
+static const char *types_json =
+"{\n"
+"\t\"k1\": false,\n"
+"\t\"k2\": -123,\n"
+"\t\"k3\": 123,\n"
+"\t\"k4\": -456,\n"
+"\t\"k5\": 456,\n"
+"\t\"k6\": \"string\",\n"
+"\t\"k7\": 789,\n"
+"\t\"k8\": 10000000000\n"
+"}\n";
+
+static const char *types_table =
+"K1: false\n"
+"K2: -123\n"
+"K3: 123\n"
+"K4: -456\n"
+"K5: 456\n"
+"K6: \"string\"\n"
+"K7: 789\n"
+"K8: 10000000000\n";
+
+TEST_BEGIN(test_types) {
+ assert_emit_output(&emit_types, types_json, types_table);
+}
+TEST_END
+
+static void
+emit_modal(emitter_t *emitter) {
+ int val = 123;
+ emitter_begin(emitter);
+ emitter_dict_begin(emitter, "j0", "T0");
+ emitter_json_key(emitter, "j1");
+ emitter_json_object_begin(emitter);
+ emitter_kv(emitter, "i1", "I1", emitter_type_int, &val);
+ emitter_json_kv(emitter, "i2", emitter_type_int, &val);
+ emitter_table_kv(emitter, "I3", emitter_type_int, &val);
+ emitter_table_dict_begin(emitter, "T1");
+ emitter_kv(emitter, "i4", "I4", emitter_type_int, &val);
+ emitter_json_object_end(emitter); /* Close j1 */
+ emitter_kv(emitter, "i5", "I5", emitter_type_int, &val);
+ emitter_table_dict_end(emitter); /* Close T1 */
+ emitter_kv(emitter, "i6", "I6", emitter_type_int, &val);
+ emitter_dict_end(emitter); /* Close j0 / T0 */
+ emitter_end(emitter);
+}
+
+const char *modal_json =
+"{\n"
+"\t\"j0\": {\n"
+"\t\t\"j1\": {\n"
+"\t\t\t\"i1\": 123,\n"
+"\t\t\t\"i2\": 123,\n"
+"\t\t\t\"i4\": 123\n"
+"\t\t},\n"
+"\t\t\"i5\": 123,\n"
+"\t\t\"i6\": 123\n"
+"\t}\n"
+"}\n";
+
+const char *modal_table =
+"T0\n"
+" I1: 123\n"
+" I3: 123\n"
+" T1\n"
+" I4: 123\n"
+" I5: 123\n"
+" I6: 123\n";
+
+TEST_BEGIN(test_modal) {
+ assert_emit_output(&emit_modal, modal_json, modal_table);
+}
+TEST_END
+
+static void
+emit_json_arr(emitter_t *emitter) {
+ int ival = 123;
+
+ emitter_begin(emitter);
+ emitter_json_key(emitter, "dict");
+ emitter_json_object_begin(emitter);
+ emitter_json_key(emitter, "arr");
+ emitter_json_array_begin(emitter);
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "foo", emitter_type_int, &ival);
+ emitter_json_object_end(emitter); /* Close arr[0] */
+ /* arr[1] and arr[2] are primitives. */
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "bar", emitter_type_int, &ival);
+ emitter_json_kv(emitter, "baz", emitter_type_int, &ival);
+ emitter_json_object_end(emitter); /* Close arr[3]. */
+ emitter_json_array_end(emitter); /* Close arr. */
+ emitter_json_object_end(emitter); /* Close dict. */
+ emitter_end(emitter);
+}
+
+static const char *json_array_json =
+"{\n"
+"\t\"dict\": {\n"
+"\t\t\"arr\": [\n"
+"\t\t\t{\n"
+"\t\t\t\t\"foo\": 123\n"
+"\t\t\t},\n"
+"\t\t\t123,\n"
+"\t\t\t123,\n"
+"\t\t\t{\n"
+"\t\t\t\t\"bar\": 123,\n"
+"\t\t\t\t\"baz\": 123\n"
+"\t\t\t}\n"
+"\t\t]\n"
+"\t}\n"
+"}\n";
+
+static const char *json_array_table = "";
+
+TEST_BEGIN(test_json_arr) {
+ assert_emit_output(&emit_json_arr, json_array_json, json_array_table);
+}
+TEST_END
+
+static void
+emit_json_nested_array(emitter_t *emitter) {
+ int ival = 123;
+ char *sval = "foo";
+ emitter_begin(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_value(emitter, emitter_type_string, &sval);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_value(emitter, emitter_type_string, &sval);
+ emitter_json_array_end(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_array_end(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_value(emitter, emitter_type_string, &sval);
+ emitter_json_value(emitter, emitter_type_int, &ival);
+ emitter_json_array_end(emitter);
+ emitter_json_array_begin(emitter);
+ emitter_json_array_end(emitter);
+ emitter_json_array_end(emitter);
+ emitter_end(emitter);
+}
+
+static const char *json_nested_array_json =
+"{\n"
+"\t[\n"
+"\t\t[\n"
+"\t\t\t123,\n"
+"\t\t\t\"foo\",\n"
+"\t\t\t123,\n"
+"\t\t\t\"foo\"\n"
+"\t\t],\n"
+"\t\t[\n"
+"\t\t\t123\n"
+"\t\t],\n"
+"\t\t[\n"
+"\t\t\t\"foo\",\n"
+"\t\t\t123\n"
+"\t\t],\n"
+"\t\t[\n"
+"\t\t]\n"
+"\t]\n"
+"}\n";
+
+TEST_BEGIN(test_json_nested_arr) {
+ assert_emit_output(&emit_json_nested_array, json_nested_array_json,
+ json_array_table);
+}
+TEST_END
+
+static void
+emit_table_row(emitter_t *emitter) {
+ emitter_begin(emitter);
+ emitter_row_t row;
+ emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}};
+ abc.str_val = "ABC title";
+ emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}};
+ def.str_val = "DEF title";
+ emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}};
+ ghi.str_val = "GHI";
+
+ emitter_row_init(&row);
+ emitter_col_init(&abc, &row);
+ emitter_col_init(&def, &row);
+ emitter_col_init(&ghi, &row);
+
+ emitter_table_row(emitter, &row);
+
+ abc.type = emitter_type_int;
+ def.type = emitter_type_bool;
+ ghi.type = emitter_type_int;
+
+ abc.int_val = 123;
+ def.bool_val = true;
+ ghi.int_val = 456;
+ emitter_table_row(emitter, &row);
+
+ abc.int_val = 789;
+ def.bool_val = false;
+ ghi.int_val = 1011;
+ emitter_table_row(emitter, &row);
+
+ abc.type = emitter_type_string;
+ abc.str_val = "a string";
+ def.bool_val = false;
+ ghi.type = emitter_type_title;
+ ghi.str_val = "ghi";
+ emitter_table_row(emitter, &row);
+
+ emitter_end(emitter);
+}
+
+static const char *table_row_json =
+"{\n"
+"}\n";
+
+static const char *table_row_table =
+"ABC title DEF title GHI\n"
+"123 true 456\n"
+"789 false 1011\n"
+"\"a string\" false ghi\n";
+
+TEST_BEGIN(test_table_row) {
+ assert_emit_output(&emit_table_row, table_row_json, table_row_table);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_dict,
+ test_table_printf,
+ test_nested_dict,
+ test_types,
+ test_modal,
+ test_json_arr,
+ test_json_nested_arr,
+ test_table_row);
+}
diff --git a/deps/jemalloc/test/unit/extent_quantize.c b/deps/jemalloc/test/unit/extent_quantize.c
new file mode 100644
index 0000000..0ca7a75
--- /dev/null
+++ b/deps/jemalloc/test/unit/extent_quantize.c
@@ -0,0 +1,141 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_small_extent_size) {
+ unsigned nbins, i;
+ size_t sz, extent_size;
+ size_t mib[4];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+
+ /*
+ * Iterate over all small size classes, get their extent sizes, and
+ * verify that the quantized size is the same as the extent size.
+ */
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib failure");
+ for (i = 0; i < nbins; i++) {
+ mib[2] = i;
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
+ NULL, 0), 0, "Unexpected mallctlbymib failure");
+ assert_zu_eq(extent_size,
+ extent_size_quantize_floor(extent_size),
+ "Small extent quantization should be a no-op "
+ "(extent_size=%zu)", extent_size);
+ assert_zu_eq(extent_size,
+ extent_size_quantize_ceil(extent_size),
+ "Small extent quantization should be a no-op "
+ "(extent_size=%zu)", extent_size);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_large_extent_size) {
+ bool cache_oblivious;
+ unsigned nlextents, i;
+ size_t sz, extent_size_prev, ceil_prev;
+ size_t mib[4];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+
+ /*
+ * Iterate over all large size classes, get their extent sizes, and
+ * verify that the quantized size is the same as the extent size.
+ */
+
+ sz = sizeof(bool);
+ assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
+ &sz, NULL, 0), 0, "Unexpected mallctl failure");
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib failure");
+ for (i = 0; i < nlextents; i++) {
+ size_t lextent_size, extent_size, floor, ceil;
+
+ mib[2] = i;
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
+ &sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
+ extent_size = cache_oblivious ? lextent_size + PAGE :
+ lextent_size;
+ floor = extent_size_quantize_floor(extent_size);
+ ceil = extent_size_quantize_ceil(extent_size);
+
+ assert_zu_eq(extent_size, floor,
+ "Extent quantization should be a no-op for precise size "
+ "(lextent_size=%zu, extent_size=%zu)", lextent_size,
+ extent_size);
+ assert_zu_eq(extent_size, ceil,
+ "Extent quantization should be a no-op for precise size "
+ "(lextent_size=%zu, extent_size=%zu)", lextent_size,
+ extent_size);
+
+ if (i > 0) {
+ assert_zu_eq(extent_size_prev,
+ extent_size_quantize_floor(extent_size - PAGE),
+ "Floor should be a precise size");
+ if (extent_size_prev < ceil_prev) {
+ assert_zu_eq(ceil_prev, extent_size,
+ "Ceiling should be a precise size "
+ "(extent_size_prev=%zu, ceil_prev=%zu, "
+ "extent_size=%zu)", extent_size_prev,
+ ceil_prev, extent_size);
+ }
+ }
+ if (i + 1 < nlextents) {
+ extent_size_prev = floor;
+ ceil_prev = extent_size_quantize_ceil(extent_size +
+ PAGE);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_monotonic) {
+#define SZ_MAX ZU(4 * 1024 * 1024)
+ unsigned i;
+ size_t floor_prev, ceil_prev;
+
+ floor_prev = 0;
+ ceil_prev = 0;
+ for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) {
+ size_t extent_size, floor, ceil;
+
+ extent_size = i << LG_PAGE;
+ floor = extent_size_quantize_floor(extent_size);
+ ceil = extent_size_quantize_ceil(extent_size);
+
+ assert_zu_le(floor, extent_size,
+ "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
+ floor, extent_size, ceil);
+ assert_zu_ge(ceil, extent_size,
+ "Ceiling should be >= (floor=%zu, extent_size=%zu, "
+ "ceil=%zu)", floor, extent_size, ceil);
+
+ assert_zu_le(floor_prev, floor, "Floor should be monotonic "
+ "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
+ floor_prev, floor, extent_size, ceil);
+ assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
+ "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
+ floor, extent_size, ceil_prev, ceil);
+
+ floor_prev = floor;
+ ceil_prev = ceil;
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_small_extent_size,
+ test_large_extent_size,
+ test_monotonic);
+}
diff --git a/deps/jemalloc/test/unit/extent_util.c b/deps/jemalloc/test/unit/extent_util.c
new file mode 100644
index 0000000..97e55f0
--- /dev/null
+++ b/deps/jemalloc/test/unit/extent_util.c
@@ -0,0 +1,269 @@
+#include "test/jemalloc_test.h"
+
+#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \
+ assert_d_eq(mallctl("experimental.utilization." node, \
+ a, b, c, d), EINVAL, "Should fail when " why_inval); \
+ assert_zu_eq(out_sz, out_sz_ref, \
+ "Output size touched when given invalid arguments"); \
+ assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \
+ "Output content touched when given invalid arguments"); \
+} while (0)
+
+#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \
+ TEST_UTIL_EINVAL("query", a, b, c, d, why_inval)
+#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \
+ TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval)
+
+#define TEST_UTIL_VALID(node) do { \
+ assert_d_eq(mallctl("experimental.utilization." node, \
+ out, &out_sz, in, in_sz), 0, \
+ "Should return 0 on correct arguments"); \
+ assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
+ assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
+ "Output content should be changed"); \
+} while (0)
+
+#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query")
+
+#define TEST_MAX_SIZE (1 << 20)
+
+TEST_BEGIN(test_query) {
+ size_t sz;
+ /*
+ * Select some sizes that can span both small and large sizes, and are
+ * numerically unrelated to any size boundaries.
+ */
+ for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
+ sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) {
+ void *p = mallocx(sz, 0);
+ void **in = &p;
+ size_t in_sz = sizeof(const void *);
+ size_t out_sz = sizeof(void *) + sizeof(size_t) * 5;
+ void *out = mallocx(out_sz, 0);
+ void *out_ref = mallocx(out_sz, 0);
+ size_t out_sz_ref = out_sz;
+
+ assert_ptr_not_null(p,
+ "test pointer allocation failed");
+ assert_ptr_not_null(out,
+ "test output allocation failed");
+ assert_ptr_not_null(out_ref,
+ "test reference output allocation failed");
+
+#define SLABCUR_READ(out) (*(void **)out)
+#define COUNTS(out) ((size_t *)((void **)out + 1))
+#define NFREE_READ(out) COUNTS(out)[0]
+#define NREGS_READ(out) COUNTS(out)[1]
+#define SIZE_READ(out) COUNTS(out)[2]
+#define BIN_NFREE_READ(out) COUNTS(out)[3]
+#define BIN_NREGS_READ(out) COUNTS(out)[4]
+
+ SLABCUR_READ(out) = NULL;
+ NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1;
+ BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1;
+ memcpy(out_ref, out, out_sz);
+
+ /* Test invalid argument(s) errors */
+ TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz,
+ "old is NULL");
+ TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz,
+ "oldlenp is NULL");
+ TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz,
+ "newp is NULL");
+ TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0,
+ "newlen is zero");
+ in_sz -= 1;
+ TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
+ "invalid newlen");
+ in_sz += 1;
+ out_sz_ref = out_sz -= 2 * sizeof(size_t);
+ TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
+ "invalid *oldlenp");
+ out_sz_ref = out_sz += 2 * sizeof(size_t);
+
+ /* Examine output for valid call */
+ TEST_UTIL_VALID("query");
+ assert_zu_le(sz, SIZE_READ(out),
+ "Extent size should be at least allocation size");
+ assert_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
+ "Extent size should be a multiple of page size");
+ if (sz <= SC_SMALL_MAXCLASS) {
+ assert_zu_le(NFREE_READ(out), NREGS_READ(out),
+ "Extent free count exceeded region count");
+ assert_zu_le(NREGS_READ(out), SIZE_READ(out),
+ "Extent region count exceeded size");
+ assert_zu_ne(NREGS_READ(out), 0,
+ "Extent region count must be positive");
+ assert_ptr_not_null(SLABCUR_READ(out),
+ "Current slab is null");
+ assert_true(NFREE_READ(out) == 0
+ || SLABCUR_READ(out) <= p,
+ "Allocation should follow first fit principle");
+ if (config_stats) {
+ assert_zu_le(BIN_NFREE_READ(out),
+ BIN_NREGS_READ(out),
+ "Bin free count exceeded region count");
+ assert_zu_ne(BIN_NREGS_READ(out), 0,
+ "Bin region count must be positive");
+ assert_zu_le(NFREE_READ(out),
+ BIN_NFREE_READ(out),
+ "Extent free count exceeded bin free count");
+ assert_zu_le(NREGS_READ(out),
+ BIN_NREGS_READ(out),
+ "Extent region count exceeded "
+ "bin region count");
+ assert_zu_eq(BIN_NREGS_READ(out)
+ % NREGS_READ(out), 0,
+ "Bin region count isn't a multiple of "
+ "extent region count");
+ assert_zu_le(
+ BIN_NFREE_READ(out) - NFREE_READ(out),
+ BIN_NREGS_READ(out) - NREGS_READ(out),
+ "Free count in other extents in the bin "
+ "exceeded region count in other extents "
+ "in the bin");
+ assert_zu_le(NREGS_READ(out) - NFREE_READ(out),
+ BIN_NREGS_READ(out) - BIN_NFREE_READ(out),
+ "Extent utilized count exceeded "
+ "bin utilized count");
+ }
+ } else {
+ assert_zu_eq(NFREE_READ(out), 0,
+ "Extent free count should be zero");
+ assert_zu_eq(NREGS_READ(out), 1,
+ "Extent region count should be one");
+ assert_ptr_null(SLABCUR_READ(out),
+ "Current slab must be null for large size classes");
+ if (config_stats) {
+ assert_zu_eq(BIN_NFREE_READ(out), 0,
+ "Bin free count must be zero for "
+ "large sizes");
+ assert_zu_eq(BIN_NREGS_READ(out), 0,
+ "Bin region count must be zero for "
+ "large sizes");
+ }
+ }
+
+#undef BIN_NREGS_READ
+#undef BIN_NFREE_READ
+#undef SIZE_READ
+#undef NREGS_READ
+#undef NFREE_READ
+#undef COUNTS
+#undef SLABCUR_READ
+
+ free(out_ref);
+ free(out);
+ free(p);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_batch) {
+ size_t sz;
+ /*
+ * Select some sizes that can span both small and large sizes, and are
+ * numerically unrelated to any size boundaries.
+ */
+ for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
+ sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) {
+ void *p = mallocx(sz, 0);
+ void *q = mallocx(sz, 0);
+ void *in[] = {p, q};
+ size_t in_sz = sizeof(const void *) * 2;
+ size_t out[] = {-1, -1, -1, -1, -1, -1};
+ size_t out_sz = sizeof(size_t) * 6;
+ size_t out_ref[] = {-1, -1, -1, -1, -1, -1};
+ size_t out_sz_ref = out_sz;
+
+ assert_ptr_not_null(p, "test pointer allocation failed");
+ assert_ptr_not_null(q, "test pointer allocation failed");
+
+ /* Test invalid argument(s) errors */
+ TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz,
+ "old is NULL");
+ TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz,
+ "oldlenp is NULL");
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz,
+ "newp is NULL");
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0,
+ "newlen is zero");
+ in_sz -= 1;
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
+ "newlen is not an exact multiple");
+ in_sz += 1;
+ out_sz_ref = out_sz -= 2 * sizeof(size_t);
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
+ "*oldlenp is not an exact multiple");
+ out_sz_ref = out_sz += 2 * sizeof(size_t);
+ in_sz -= sizeof(const void *);
+ TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
+ "*oldlenp and newlen do not match");
+ in_sz += sizeof(const void *);
+
+ /* Examine output for valid calls */
+#define TEST_EQUAL_REF(i, message) \
+ assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message)
+
+#define NFREE_READ(out, i) out[(i) * 3]
+#define NREGS_READ(out, i) out[(i) * 3 + 1]
+#define SIZE_READ(out, i) out[(i) * 3 + 2]
+
+ out_sz_ref = out_sz /= 2;
+ in_sz /= 2;
+ TEST_UTIL_BATCH_VALID;
+ assert_zu_le(sz, SIZE_READ(out, 0),
+ "Extent size should be at least allocation size");
+ assert_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
+ "Extent size should be a multiple of page size");
+ if (sz <= SC_SMALL_MAXCLASS) {
+ assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
+ "Extent free count exceeded region count");
+ assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
+ "Extent region count exceeded size");
+ assert_zu_ne(NREGS_READ(out, 0), 0,
+ "Extent region count must be positive");
+ } else {
+ assert_zu_eq(NFREE_READ(out, 0), 0,
+ "Extent free count should be zero");
+ assert_zu_eq(NREGS_READ(out, 0), 1,
+ "Extent region count should be one");
+ }
+ TEST_EQUAL_REF(1,
+ "Should not overwrite content beyond what's needed");
+ in_sz *= 2;
+ out_sz_ref = out_sz *= 2;
+
+ memcpy(out_ref, out, 3 * sizeof(size_t));
+ TEST_UTIL_BATCH_VALID;
+ TEST_EQUAL_REF(0, "Statistics should be stable across calls");
+ if (sz <= SC_SMALL_MAXCLASS) {
+ assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
+ "Extent free count exceeded region count");
+ } else {
+ assert_zu_eq(NFREE_READ(out, 0), 0,
+ "Extent free count should be zero");
+ }
+ assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
+ "Extent region count should be same for same region size");
+ assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
+ "Extent size should be same for same region size");
+
+#undef SIZE_READ
+#undef NREGS_READ
+#undef NFREE_READ
+
+#undef TEST_EQUAL_REF
+
+ free(q);
+ free(p);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ assert_zu_lt(SC_SMALL_MAXCLASS, TEST_MAX_SIZE,
+ "Test case cannot cover large classes");
+ return test(test_query, test_batch);
+}
diff --git a/deps/jemalloc/test/unit/fork.c b/deps/jemalloc/test/unit/fork.c
new file mode 100644
index 0000000..b169075
--- /dev/null
+++ b/deps/jemalloc/test/unit/fork.c
@@ -0,0 +1,141 @@
+#include "test/jemalloc_test.h"
+
+#ifndef _WIN32
+#include <sys/wait.h>
+#endif
+
+#ifndef _WIN32
+static void
+wait_for_child_exit(int pid) {
+ int status;
+ while (true) {
+ if (waitpid(pid, &status, 0) == -1) {
+ test_fail("Unexpected waitpid() failure.");
+ }
+ if (WIFSIGNALED(status)) {
+ test_fail("Unexpected child termination due to "
+ "signal %d", WTERMSIG(status));
+ break;
+ }
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) != 0) {
+ test_fail("Unexpected child exit value %d",
+ WEXITSTATUS(status));
+ }
+ break;
+ }
+ }
+}
+#endif
+
+TEST_BEGIN(test_fork) {
+#ifndef _WIN32
+ void *p;
+ pid_t pid;
+
+ /* Set up a manually managed arena for test. */
+ unsigned arena_ind;
+ size_t sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+
+ /* Migrate to the new arena. */
+ unsigned old_arena_ind;
+ sz = sizeof(old_arena_ind);
+ assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ (void *)&arena_ind, sizeof(arena_ind)), 0,
+ "Unexpected mallctl() failure");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+
+ pid = fork();
+
+ free(p);
+
+ p = malloc(64);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+ free(p);
+
+ if (pid == -1) {
+ /* Error. */
+ test_fail("Unexpected fork() failure");
+ } else if (pid == 0) {
+ /* Child. */
+ _exit(0);
+ } else {
+ wait_for_child_exit(pid);
+ }
+#else
+ test_skip("fork(2) is irrelevant to Windows");
+#endif
+}
+TEST_END
+
+#ifndef _WIN32
+static void *
+do_fork_thd(void *arg) {
+ malloc(1);
+ int pid = fork();
+ if (pid == -1) {
+ /* Error. */
+ test_fail("Unexpected fork() failure");
+ } else if (pid == 0) {
+ /* Child. */
+ char *args[] = {"true", NULL};
+ execvp(args[0], args);
+ test_fail("Exec failed");
+ } else {
+ /* Parent */
+ wait_for_child_exit(pid);
+ }
+ return NULL;
+}
+#endif
+
+#ifndef _WIN32
+static void
+do_test_fork_multithreaded() {
+ thd_t child;
+ thd_create(&child, do_fork_thd, NULL);
+ do_fork_thd(NULL);
+ thd_join(child, NULL);
+}
+#endif
+
+TEST_BEGIN(test_fork_multithreaded) {
+#ifndef _WIN32
+ /*
+ * We've seen bugs involving hanging on arenas_lock (though the same
+ * class of bugs can happen on any mutex). The bugs are intermittent
+ * though, so we want to run the test multiple times. Since we hold the
+ * arenas lock only early in the process lifetime, we can't just run
+ * this test in a loop (since, after all the arenas are initialized, we
+ * won't acquire arenas_lock any further). We therefore repeat the test
+ * with multiple processes.
+ */
+ for (int i = 0; i < 100; i++) {
+ int pid = fork();
+ if (pid == -1) {
+ /* Error. */
+ test_fail("Unexpected fork() failure,");
+ } else if (pid == 0) {
+ /* Child. */
+ do_test_fork_multithreaded();
+ _exit(0);
+ } else {
+ wait_for_child_exit(pid);
+ }
+ }
+#else
+ test_skip("fork(2) is irrelevant to Windows");
+#endif
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_fork,
+ test_fork_multithreaded);
+}
diff --git a/deps/jemalloc/test/unit/hash.c b/deps/jemalloc/test/unit/hash.c
new file mode 100644
index 0000000..7cc034f
--- /dev/null
+++ b/deps/jemalloc/test/unit/hash.c
@@ -0,0 +1,173 @@
+/*
+ * This file is based on code that is part of SMHasher
+ * (https://code.google.com/p/smhasher/), and is subject to the MIT license
+ * (http://www.opensource.org/licenses/mit-license.php). Both email addresses
+ * associated with the source code's revision history belong to Austin Appleby,
+ * and the revision history ranges from 2010 to 2012. Therefore the copyright
+ * and license are here taken to be:
+ *
+ * Copyright (c) 2010-2012 Austin Appleby
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "test/jemalloc_test.h"
+#include "jemalloc/internal/hash.h"
+
+typedef enum {
+ hash_variant_x86_32,
+ hash_variant_x86_128,
+ hash_variant_x64_128
+} hash_variant_t;
+
+static int
+hash_variant_bits(hash_variant_t variant) {
+ switch (variant) {
+ case hash_variant_x86_32: return 32;
+ case hash_variant_x86_128: return 128;
+ case hash_variant_x64_128: return 128;
+ default: not_reached();
+ }
+}
+
+static const char *
+hash_variant_string(hash_variant_t variant) {
+ switch (variant) {
+ case hash_variant_x86_32: return "hash_x86_32";
+ case hash_variant_x86_128: return "hash_x86_128";
+ case hash_variant_x64_128: return "hash_x64_128";
+ default: not_reached();
+ }
+}
+
+#define KEY_SIZE 256
+static void
+hash_variant_verify_key(hash_variant_t variant, uint8_t *key) {
+ const int hashbytes = hash_variant_bits(variant) / 8;
+ const int hashes_size = hashbytes * 256;
+ VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
+ VARIABLE_ARRAY(uint8_t, final, hashbytes);
+ unsigned i;
+ uint32_t computed, expected;
+
+ memset(key, 0, KEY_SIZE);
+ memset(hashes, 0, hashes_size);
+ memset(final, 0, hashbytes);
+
+ /*
+ * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
+ * seed.
+ */
+ for (i = 0; i < 256; i++) {
+ key[i] = (uint8_t)i;
+ switch (variant) {
+ case hash_variant_x86_32: {
+ uint32_t out;
+ out = hash_x86_32(key, i, 256-i);
+ memcpy(&hashes[i*hashbytes], &out, hashbytes);
+ break;
+ } case hash_variant_x86_128: {
+ uint64_t out[2];
+ hash_x86_128(key, i, 256-i, out);
+ memcpy(&hashes[i*hashbytes], out, hashbytes);
+ break;
+ } case hash_variant_x64_128: {
+ uint64_t out[2];
+ hash_x64_128(key, i, 256-i, out);
+ memcpy(&hashes[i*hashbytes], out, hashbytes);
+ break;
+ } default: not_reached();
+ }
+ }
+
+ /* Hash the result array. */
+ switch (variant) {
+ case hash_variant_x86_32: {
+ uint32_t out = hash_x86_32(hashes, hashes_size, 0);
+ memcpy(final, &out, sizeof(out));
+ break;
+ } case hash_variant_x86_128: {
+ uint64_t out[2];
+ hash_x86_128(hashes, hashes_size, 0, out);
+ memcpy(final, out, sizeof(out));
+ break;
+ } case hash_variant_x64_128: {
+ uint64_t out[2];
+ hash_x64_128(hashes, hashes_size, 0, out);
+ memcpy(final, out, sizeof(out));
+ break;
+ } default: not_reached();
+ }
+
+ computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
+ (final[3] << 24);
+
+ switch (variant) {
+#ifdef JEMALLOC_BIG_ENDIAN
+ case hash_variant_x86_32: expected = 0x6213303eU; break;
+ case hash_variant_x86_128: expected = 0x266820caU; break;
+ case hash_variant_x64_128: expected = 0xcc622b6fU; break;
+#else
+ case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
+ case hash_variant_x86_128: expected = 0xb3ece62aU; break;
+ case hash_variant_x64_128: expected = 0x6384ba69U; break;
+#endif
+ default: not_reached();
+ }
+
+ assert_u32_eq(computed, expected,
+ "Hash mismatch for %s(): expected %#x but got %#x",
+ hash_variant_string(variant), expected, computed);
+}
+
+static void
+hash_variant_verify(hash_variant_t variant) {
+#define MAX_ALIGN 16
+ uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
+ unsigned i;
+
+ for (i = 0; i < MAX_ALIGN; i++) {
+ hash_variant_verify_key(variant, &key[i]);
+ }
+#undef MAX_ALIGN
+}
+#undef KEY_SIZE
+
+TEST_BEGIN(test_hash_x86_32) {
+ hash_variant_verify(hash_variant_x86_32);
+}
+TEST_END
+
+TEST_BEGIN(test_hash_x86_128) {
+ hash_variant_verify(hash_variant_x86_128);
+}
+TEST_END
+
+TEST_BEGIN(test_hash_x64_128) {
+ hash_variant_verify(hash_variant_x64_128);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_hash_x86_32,
+ test_hash_x86_128,
+ test_hash_x64_128);
+}
diff --git a/deps/jemalloc/test/unit/hook.c b/deps/jemalloc/test/unit/hook.c
new file mode 100644
index 0000000..72fcc43
--- /dev/null
+++ b/deps/jemalloc/test/unit/hook.c
@@ -0,0 +1,580 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/hook.h"
+
+static void *arg_extra;
+static int arg_type;
+static void *arg_result;
+static void *arg_address;
+static size_t arg_old_usize;
+static size_t arg_new_usize;
+static uintptr_t arg_result_raw;
+static uintptr_t arg_args_raw[4];
+
+static int call_count = 0;
+
+static void
+reset_args() {
+ arg_extra = NULL;
+ arg_type = 12345;
+ arg_result = NULL;
+ arg_address = NULL;
+ arg_old_usize = 0;
+ arg_new_usize = 0;
+ arg_result_raw = 0;
+ memset(arg_args_raw, 77, sizeof(arg_args_raw));
+}
+
+static void
+alloc_free_size(size_t sz) {
+ void *ptr = mallocx(1, 0);
+ free(ptr);
+ ptr = mallocx(1, 0);
+ free(ptr);
+ ptr = mallocx(1, MALLOCX_TCACHE_NONE);
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+}
+
+/*
+ * We want to support a degree of user reentrancy. This tests a variety of
+ * allocation scenarios.
+ */
+static void
+be_reentrant() {
+ /* Let's make sure the tcache is non-empty if enabled. */
+ alloc_free_size(1);
+ alloc_free_size(1024);
+ alloc_free_size(64 * 1024);
+ alloc_free_size(256 * 1024);
+ alloc_free_size(1024 * 1024);
+
+ /* Some reallocation. */
+ void *ptr = mallocx(129, 0);
+ ptr = rallocx(ptr, 130, 0);
+ free(ptr);
+
+ ptr = mallocx(2 * 1024 * 1024, 0);
+ free(ptr);
+ ptr = mallocx(1 * 1024 * 1024, 0);
+ ptr = rallocx(ptr, 2 * 1024 * 1024, 0);
+ free(ptr);
+
+ ptr = mallocx(1, 0);
+ ptr = rallocx(ptr, 1000, 0);
+ free(ptr);
+}
+
+static void
+set_args_raw(uintptr_t *args_raw, int nargs) {
+ memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs);
+}
+
+static void
+assert_args_raw(uintptr_t *args_raw_expected, int nargs) {
+ int cmp = memcmp(args_raw_expected, arg_args_raw,
+ sizeof(uintptr_t) * nargs);
+ assert_d_eq(cmp, 0, "Raw args mismatch");
+}
+
+static void
+reset() {
+ call_count = 0;
+ reset_args();
+}
+
+static void
+test_alloc_hook(void *extra, hook_alloc_t type, void *result,
+ uintptr_t result_raw, uintptr_t args_raw[3]) {
+ call_count++;
+ arg_extra = extra;
+ arg_type = (int)type;
+ arg_result = result;
+ arg_result_raw = result_raw;
+ set_args_raw(args_raw, 3);
+ be_reentrant();
+}
+
+static void
+test_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
+ uintptr_t args_raw[3]) {
+ call_count++;
+ arg_extra = extra;
+ arg_type = (int)type;
+ arg_address = address;
+ set_args_raw(args_raw, 3);
+ be_reentrant();
+}
+
+static void
+test_expand_hook(void *extra, hook_expand_t type, void *address,
+ size_t old_usize, size_t new_usize, uintptr_t result_raw,
+ uintptr_t args_raw[4]) {
+ call_count++;
+ arg_extra = extra;
+ arg_type = (int)type;
+ arg_address = address;
+ arg_old_usize = old_usize;
+ arg_new_usize = new_usize;
+ arg_result_raw = result_raw;
+ set_args_raw(args_raw, 4);
+ be_reentrant();
+}
+
+TEST_BEGIN(test_hooks_basic) {
+ /* Just verify that the record their arguments correctly. */
+ hooks_t hooks = {
+ &test_alloc_hook, &test_dalloc_hook, &test_expand_hook,
+ (void *)111};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ uintptr_t args_raw[4] = {10, 20, 30, 40};
+
+ /* Alloc */
+ reset_args();
+ hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333,
+ args_raw);
+ assert_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
+ assert_d_eq((int)hook_alloc_posix_memalign, arg_type,
+ "Passed wrong alloc type");
+ assert_ptr_eq((void *)222, arg_result, "Passed wrong result address");
+ assert_u64_eq(333, arg_result_raw, "Passed wrong result");
+ assert_args_raw(args_raw, 3);
+
+ /* Dalloc */
+ reset_args();
+ hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw);
+ assert_d_eq((int)hook_dalloc_sdallocx, arg_type,
+ "Passed wrong dalloc type");
+ assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
+ assert_ptr_eq((void *)222, arg_address, "Passed wrong address");
+ assert_args_raw(args_raw, 3);
+
+ /* Expand */
+ reset_args();
+ hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555,
+ args_raw);
+ assert_d_eq((int)hook_expand_xallocx, arg_type,
+ "Passed wrong expand type");
+ assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
+ assert_ptr_eq((void *)222, arg_address, "Passed wrong address");
+ assert_zu_eq(333, arg_old_usize, "Passed wrong old usize");
+ assert_zu_eq(444, arg_new_usize, "Passed wrong new usize");
+ assert_zu_eq(555, arg_result_raw, "Passed wrong result");
+ assert_args_raw(args_raw, 4);
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_null) {
+ /* Null hooks should be ignored, not crash. */
+ hooks_t hooks1 = {NULL, NULL, NULL, NULL};
+ hooks_t hooks2 = {&test_alloc_hook, NULL, NULL, NULL};
+ hooks_t hooks3 = {NULL, &test_dalloc_hook, NULL, NULL};
+ hooks_t hooks4 = {NULL, NULL, &test_expand_hook, NULL};
+
+ void *handle1 = hook_install(TSDN_NULL, &hooks1);
+ void *handle2 = hook_install(TSDN_NULL, &hooks2);
+ void *handle3 = hook_install(TSDN_NULL, &hooks3);
+ void *handle4 = hook_install(TSDN_NULL, &hooks4);
+
+ assert_ptr_ne(handle1, NULL, "Hook installation failed");
+ assert_ptr_ne(handle2, NULL, "Hook installation failed");
+ assert_ptr_ne(handle3, NULL, "Hook installation failed");
+ assert_ptr_ne(handle4, NULL, "Hook installation failed");
+
+ uintptr_t args_raw[4] = {10, 20, 30, 40};
+
+ call_count = 0;
+ hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
+ assert_d_eq(call_count, 1, "Called wrong number of times");
+
+ call_count = 0;
+ hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw);
+ assert_d_eq(call_count, 1, "Called wrong number of times");
+
+ call_count = 0;
+ hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw);
+ assert_d_eq(call_count, 1, "Called wrong number of times");
+
+ hook_remove(TSDN_NULL, handle1);
+ hook_remove(TSDN_NULL, handle2);
+ hook_remove(TSDN_NULL, handle3);
+ hook_remove(TSDN_NULL, handle4);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_remove) {
+ hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ assert_ptr_ne(handle, NULL, "Hook installation failed");
+ call_count = 0;
+ uintptr_t args_raw[4] = {10, 20, 30, 40};
+ hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
+ assert_d_eq(call_count, 1, "Hook not invoked");
+
+ call_count = 0;
+ hook_remove(TSDN_NULL, handle);
+ hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL);
+ assert_d_eq(call_count, 0, "Hook invoked after removal");
+
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_alloc_simple) {
+ /* "Simple" in the sense that we're not in a realloc variant. */
+ hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ assert_ptr_ne(handle, NULL, "Hook installation failed");
+
+ /* Stop malloc from being optimized away. */
+ volatile int err;
+ void *volatile ptr;
+
+ /* malloc */
+ reset();
+ ptr = malloc(1);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ free(ptr);
+
+ /* posix_memalign */
+ reset();
+ err = posix_memalign((void **)&ptr, 1024, 1);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_posix_memalign,
+ "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
+ assert_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
+ free(ptr);
+
+ /* aligned_alloc */
+ reset();
+ ptr = aligned_alloc(1024, 1);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
+ "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ /* calloc */
+ reset();
+ ptr = calloc(11, 13);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ /* memalign */
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+ reset();
+ ptr = memalign(1024, 1);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+#endif /* JEMALLOC_OVERRIDE_MEMALIGN */
+
+ /* valloc */
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+ reset();
+ ptr = valloc(1);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ free(ptr);
+#endif /* JEMALLOC_OVERRIDE_VALLOC */
+
+ /* mallocx */
+ reset();
+ ptr = mallocx(1, MALLOCX_LG_ALIGN(10));
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
+ "Wrong flags");
+ free(ptr);
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_dalloc_simple) {
+ /* "Simple" in the sense that we're not in a realloc variant. */
+ hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ assert_ptr_ne(handle, NULL, "Hook installation failed");
+
+ void *volatile ptr;
+
+ /* free() */
+ reset();
+ ptr = malloc(1);
+ free(ptr);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+
+ /* dallocx() */
+ reset();
+ ptr = malloc(1);
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+ assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
+ "Wrong raw arg");
+
+ /* sdallocx() */
+ reset();
+ ptr = malloc(1);
+ sdallocx(ptr, 1, MALLOCX_TCACHE_NONE);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+ assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
+ assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
+ "Wrong raw arg");
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_expand_simple) {
+ /* "Simple" in the sense that we're not in a realloc variant. */
+ hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ assert_ptr_ne(handle, NULL, "Hook installation failed");
+
+ void *volatile ptr;
+
+ /* xallocx() */
+ reset();
+ ptr = malloc(1);
+ size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
+ assert_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
+ assert_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
+ assert_u64_eq(new_usize, arg_result_raw, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
+ assert_u64_eq(100, arg_args_raw[1], "Wrong arg");
+ assert_u64_eq(200, arg_args_raw[2], "Wrong arg");
+ assert_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
+ hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
+ &test_expand_hook, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ assert_ptr_ne(handle, NULL, "Hook installation failed");
+
+ void *volatile ptr;
+
+ /* realloc(NULL, size) as malloc */
+ reset();
+ ptr = realloc(NULL, 1);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ /* realloc(ptr, 0) as free */
+ ptr = malloc(1);
+ reset();
+ realloc(ptr, 0);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_dalloc_realloc, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+ assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong raw arg");
+
+ /* realloc(NULL, 0) as malloc(0) */
+ reset();
+ ptr = realloc(NULL, 0);
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_result, "Wrong result");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ hook_remove(TSDN_NULL, handle);
+}
+TEST_END
+
+static void
+do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
+ int expand_type, int dalloc_type) {
+ hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
+ &test_expand_hook, (void *)123};
+ void *handle = hook_install(TSDN_NULL, &hooks);
+ assert_ptr_ne(handle, NULL, "Hook installation failed");
+
+ void *volatile ptr;
+ void *volatile ptr2;
+
+ /* Realloc in-place, small. */
+ ptr = malloc(129);
+ reset();
+ ptr2 = ralloc(ptr, 130, flags);
+ assert_ptr_eq(ptr, ptr2, "Small realloc moved");
+
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, expand_type, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_address, "Wrong address");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
+ free(ptr);
+
+ /*
+ * Realloc in-place, large. Since we can't guarantee the large case
+ * across all platforms, we stay resilient to moving results.
+ */
+ ptr = malloc(2 * 1024 * 1024);
+ free(ptr);
+ ptr2 = malloc(1 * 1024 * 1024);
+ reset();
+ ptr = ralloc(ptr2, 2 * 1024 * 1024, flags);
+ /* ptr is the new address, ptr2 is the old address. */
+ if (ptr == ptr2) {
+ assert_d_eq(call_count, 1, "Hook not called");
+ assert_d_eq(arg_type, expand_type, "Wrong hook type");
+ } else {
+ assert_d_eq(call_count, 2, "Wrong hooks called");
+ assert_ptr_eq(ptr, arg_result, "Wrong address");
+ assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ }
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_ptr_eq(ptr2, arg_address, "Wrong address");
+ assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
+ "Wrong argument");
+ free(ptr);
+
+ /* Realloc with move, small. */
+ ptr = malloc(8);
+ reset();
+ ptr2 = ralloc(ptr, 128, flags);
+ assert_ptr_ne(ptr, ptr2, "Small realloc didn't move");
+
+ assert_d_eq(call_count, 2, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_address, "Wrong address");
+ assert_ptr_eq(ptr2, arg_result, "Wrong address");
+ assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
+ free(ptr2);
+
+ /* Realloc with move, large. */
+ ptr = malloc(1);
+ reset();
+ ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags);
+ assert_ptr_ne(ptr, ptr2, "Large realloc didn't move");
+
+ assert_d_eq(call_count, 2, "Hook not called");
+ assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ assert_ptr_eq(ptr, arg_address, "Wrong address");
+ assert_ptr_eq(ptr2, arg_result, "Wrong address");
+ assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
+ "Wrong raw result");
+ assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
+ "Wrong argument");
+ free(ptr2);
+
+ hook_remove(TSDN_NULL, handle);
+}
+
+static void *
+realloc_wrapper(void *ptr, size_t size, UNUSED int flags) {
+ return realloc(ptr, size);
+}
+
+TEST_BEGIN(test_hooks_realloc) {
+ do_realloc_test(&realloc_wrapper, 0, hook_expand_realloc,
+ hook_dalloc_realloc);
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_rallocx) {
+ do_realloc_test(&rallocx, MALLOCX_TCACHE_NONE, hook_expand_rallocx,
+ hook_dalloc_rallocx);
+}
+TEST_END
+
+int
+main(void) {
+ /* We assert on call counts. */
+ return test_no_reentrancy(
+ test_hooks_basic,
+ test_hooks_null,
+ test_hooks_remove,
+ test_hooks_alloc_simple,
+ test_hooks_dalloc_simple,
+ test_hooks_expand_simple,
+ test_hooks_realloc_as_malloc_or_free,
+ test_hooks_realloc,
+ test_hooks_rallocx);
+}
diff --git a/deps/jemalloc/test/unit/huge.c b/deps/jemalloc/test/unit/huge.c
new file mode 100644
index 0000000..ab72cf0
--- /dev/null
+++ b/deps/jemalloc/test/unit/huge.c
@@ -0,0 +1,108 @@
+#include "test/jemalloc_test.h"
+
+/* Threshold: 2 << 20 = 2097152. */
+const char *malloc_conf = "oversize_threshold:2097152";
+
+#define HUGE_SZ (2 << 20)
+#define SMALL_SZ (8)
+
+TEST_BEGIN(huge_bind_thread) {
+ unsigned arena1, arena2;
+ size_t sz = sizeof(unsigned);
+
+ /* Bind to a manual arena. */
+ assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
+ "Failed to create arena");
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
+ sizeof(arena1)), 0, "Fail to bind thread");
+
+ void *ptr = mallocx(HUGE_SZ, 0);
+ assert_ptr_not_null(ptr, "Fail to allocate huge size");
+ assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ assert_u_eq(arena1, arena2, "Wrong arena used after binding");
+ dallocx(ptr, 0);
+
+ /* Switch back to arena 0. */
+ test_skip_if(have_percpu_arena &&
+ PERCPU_ARENA_ENABLED(opt_percpu_arena));
+ arena2 = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
+ sizeof(arena2)), 0, "Fail to bind thread");
+ ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
+ assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ assert_u_eq(arena2, 0, "Wrong arena used after binding");
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+
+ /* Then huge allocation should use the huge arena. */
+ ptr = mallocx(HUGE_SZ, 0);
+ assert_ptr_not_null(ptr, "Fail to allocate huge size");
+ assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ assert_u_ne(arena2, 0, "Wrong arena used after binding");
+ assert_u_ne(arena1, arena2, "Wrong arena used after binding");
+ dallocx(ptr, 0);
+}
+TEST_END
+
+TEST_BEGIN(huge_mallocx) {
+ unsigned arena1, arena2;
+ size_t sz = sizeof(unsigned);
+
+ assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
+ "Failed to create arena");
+ void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1));
+ assert_ptr_not_null(huge, "Fail to allocate huge size");
+ assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
+ sizeof(huge)), 0, "Unexpected mallctl() failure");
+ assert_u_eq(arena1, arena2, "Wrong arena used for mallocx");
+ dallocx(huge, MALLOCX_ARENA(arena1));
+
+ void *huge2 = mallocx(HUGE_SZ, 0);
+ assert_ptr_not_null(huge, "Fail to allocate huge size");
+ assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
+ sizeof(huge2)), 0, "Unexpected mallctl() failure");
+ assert_u_ne(arena1, arena2,
+ "Huge allocation should not come from the manual arena.");
+ assert_u_ne(arena2, 0,
+ "Huge allocation should not come from the arena 0.");
+ dallocx(huge2, 0);
+}
+TEST_END
+
+TEST_BEGIN(huge_allocation) {
+ unsigned arena1, arena2;
+
+ void *ptr = mallocx(HUGE_SZ, 0);
+ assert_ptr_not_null(ptr, "Fail to allocate huge size");
+ size_t sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
+ 0, "Unexpected mallctl() failure");
+ assert_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
+ dallocx(ptr, 0);
+
+ ptr = mallocx(HUGE_SZ >> 1, 0);
+ assert_ptr_not_null(ptr, "Fail to allocate half huge size");
+ assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ assert_u_ne(arena1, arena2, "Wrong arena used for half huge");
+ dallocx(ptr, 0);
+
+ ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(ptr, "Fail to allocate small size");
+ assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ sizeof(ptr)), 0, "Unexpected mallctl() failure");
+ assert_u_ne(arena1, arena2,
+ "Huge and small should be from different arenas");
+ dallocx(ptr, 0);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ huge_allocation,
+ huge_mallocx,
+ huge_bind_thread);
+}
diff --git a/deps/jemalloc/test/unit/junk.c b/deps/jemalloc/test/unit/junk.c
new file mode 100644
index 0000000..57e3ad4
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk.c
@@ -0,0 +1,141 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/util.h"
+
+static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
+static large_dalloc_junk_t *large_dalloc_junk_orig;
+static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig;
+static void *watch_for_junking;
+static bool saw_junking;
+
+static void
+watch_junking(void *p) {
+ watch_for_junking = p;
+ saw_junking = false;
+}
+
+static void
+arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) {
+ size_t i;
+
+ arena_dalloc_junk_small_orig(ptr, bin_info);
+ for (i = 0; i < bin_info->reg_size; i++) {
+ assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
+ "Missing junk fill for byte %zu/%zu of deallocated region",
+ i, bin_info->reg_size);
+ }
+ if (ptr == watch_for_junking) {
+ saw_junking = true;
+ }
+}
+
+static void
+large_dalloc_junk_intercept(void *ptr, size_t usize) {
+ size_t i;
+
+ large_dalloc_junk_orig(ptr, usize);
+ for (i = 0; i < usize; i++) {
+ assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
+ "Missing junk fill for byte %zu/%zu of deallocated region",
+ i, usize);
+ }
+ if (ptr == watch_for_junking) {
+ saw_junking = true;
+ }
+}
+
+static void
+large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) {
+ large_dalloc_maybe_junk_orig(ptr, usize);
+ if (ptr == watch_for_junking) {
+ saw_junking = true;
+ }
+}
+
+static void
+test_junk(size_t sz_min, size_t sz_max) {
+ uint8_t *s;
+ size_t sz_prev, sz, i;
+
+ if (opt_junk_free) {
+ arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
+ arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
+ large_dalloc_junk_orig = large_dalloc_junk;
+ large_dalloc_junk = large_dalloc_junk_intercept;
+ large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk;
+ large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept;
+ }
+
+ sz_prev = 0;
+ s = (uint8_t *)mallocx(sz_min, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+
+ for (sz = sallocx(s, 0); sz <= sz_max;
+ sz_prev = sz, sz = sallocx(s, 0)) {
+ if (sz_prev > 0) {
+ assert_u_eq(s[0], 'a',
+ "Previously allocated byte %zu/%zu is corrupted",
+ ZU(0), sz_prev);
+ assert_u_eq(s[sz_prev-1], 'a',
+ "Previously allocated byte %zu/%zu is corrupted",
+ sz_prev-1, sz_prev);
+ }
+
+ for (i = sz_prev; i < sz; i++) {
+ if (opt_junk_alloc) {
+ assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
+ "Newly allocated byte %zu/%zu isn't "
+ "junk-filled", i, sz);
+ }
+ s[i] = 'a';
+ }
+
+ if (xallocx(s, sz+1, 0, 0) == sz) {
+ uint8_t *t;
+ watch_junking(s);
+ t = (uint8_t *)rallocx(s, sz+1, 0);
+ assert_ptr_not_null((void *)t,
+ "Unexpected rallocx() failure");
+ assert_zu_ge(sallocx(t, 0), sz+1,
+ "Unexpectedly small rallocx() result");
+ if (!background_thread_enabled()) {
+ assert_ptr_ne(s, t,
+ "Unexpected in-place rallocx()");
+ assert_true(!opt_junk_free || saw_junking,
+ "Expected region of size %zu to be "
+ "junk-filled", sz);
+ }
+ s = t;
+ }
+ }
+
+ watch_junking(s);
+ dallocx(s, 0);
+ assert_true(!opt_junk_free || saw_junking,
+ "Expected region of size %zu to be junk-filled", sz);
+
+ if (opt_junk_free) {
+ arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
+ large_dalloc_junk = large_dalloc_junk_orig;
+ large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig;
+ }
+}
+
+TEST_BEGIN(test_junk_small) {
+ test_skip_if(!config_fill);
+ test_junk(1, SC_SMALL_MAXCLASS - 1);
+}
+TEST_END
+
+TEST_BEGIN(test_junk_large) {
+ test_skip_if(!config_fill);
+ test_junk(SC_SMALL_MAXCLASS + 1, (1U << (SC_LG_LARGE_MINCLASS + 1)));
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_junk_small,
+ test_junk_large);
+}
diff --git a/deps/jemalloc/test/unit/junk.sh b/deps/jemalloc/test/unit/junk.sh
new file mode 100644
index 0000000..97cd8ca
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="abort:false,zero:false,junk:true"
+fi
diff --git a/deps/jemalloc/test/unit/junk_alloc.c b/deps/jemalloc/test/unit/junk_alloc.c
new file mode 100644
index 0000000..a442a0c
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_alloc.c
@@ -0,0 +1 @@
+#include "junk.c"
diff --git a/deps/jemalloc/test/unit/junk_alloc.sh b/deps/jemalloc/test/unit/junk_alloc.sh
new file mode 100644
index 0000000..e1008c2
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_alloc.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="abort:false,zero:false,junk:alloc"
+fi
diff --git a/deps/jemalloc/test/unit/junk_free.c b/deps/jemalloc/test/unit/junk_free.c
new file mode 100644
index 0000000..a442a0c
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_free.c
@@ -0,0 +1 @@
+#include "junk.c"
diff --git a/deps/jemalloc/test/unit/junk_free.sh b/deps/jemalloc/test/unit/junk_free.sh
new file mode 100644
index 0000000..402196c
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_free.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="abort:false,zero:false,junk:free"
+fi
diff --git a/deps/jemalloc/test/unit/log.c b/deps/jemalloc/test/unit/log.c
new file mode 100644
index 0000000..a52bd73
--- /dev/null
+++ b/deps/jemalloc/test/unit/log.c
@@ -0,0 +1,193 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/log.h"
+
+static void
+expect_no_logging(const char *names) {
+ log_var_t log_l1 = LOG_VAR_INIT("l1");
+ log_var_t log_l2 = LOG_VAR_INIT("l2");
+ log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
+
+ strcpy(log_var_names, names);
+
+ int count = 0;
+
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1)
+ count++;
+ log_do_end(log_l1)
+
+ log_do_begin(log_l2)
+ count++;
+ log_do_end(log_l2)
+
+ log_do_begin(log_l2_a)
+ count++;
+ log_do_end(log_l2_a)
+ }
+ assert_d_eq(count, 0, "Disabled logging not ignored!");
+}
+
+TEST_BEGIN(test_log_disabled) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
+ expect_no_logging("");
+ expect_no_logging("abc");
+ expect_no_logging("a.b.c");
+ expect_no_logging("l12");
+ expect_no_logging("l123|a456|b789");
+ expect_no_logging("|||");
+}
+TEST_END
+
+TEST_BEGIN(test_log_enabled_direct) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
+ log_var_t log_l1 = LOG_VAR_INIT("l1");
+ log_var_t log_l1_a = LOG_VAR_INIT("l1.a");
+ log_var_t log_l2 = LOG_VAR_INIT("l2");
+
+ int count;
+
+ count = 0;
+ strcpy(log_var_names, "l1");
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1)
+ count++;
+ log_do_end(log_l1)
+ }
+ assert_d_eq(count, 10, "Mis-logged!");
+
+ count = 0;
+ strcpy(log_var_names, "l1.a");
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1_a)
+ count++;
+ log_do_end(log_l1_a)
+ }
+ assert_d_eq(count, 10, "Mis-logged!");
+
+ count = 0;
+ strcpy(log_var_names, "l1.a|abc|l2|def");
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1_a)
+ count++;
+ log_do_end(log_l1_a)
+
+ log_do_begin(log_l2)
+ count++;
+ log_do_end(log_l2)
+ }
+ assert_d_eq(count, 20, "Mis-logged!");
+}
+TEST_END
+
+TEST_BEGIN(test_log_enabled_indirect) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
+ strcpy(log_var_names, "l0|l1|abc|l2.b|def");
+
+ /* On. */
+ log_var_t log_l1 = LOG_VAR_INIT("l1");
+ /* Off. */
+ log_var_t log_l1a = LOG_VAR_INIT("l1a");
+ /* On. */
+ log_var_t log_l1_a = LOG_VAR_INIT("l1.a");
+ /* Off. */
+ log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
+ /* On. */
+ log_var_t log_l2_b_a = LOG_VAR_INIT("l2.b.a");
+ /* On. */
+ log_var_t log_l2_b_b = LOG_VAR_INIT("l2.b.b");
+
+ /* 4 are on total, so should sum to 40. */
+ int count = 0;
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1)
+ count++;
+ log_do_end(log_l1)
+
+ log_do_begin(log_l1a)
+ count++;
+ log_do_end(log_l1a)
+
+ log_do_begin(log_l1_a)
+ count++;
+ log_do_end(log_l1_a)
+
+ log_do_begin(log_l2_a)
+ count++;
+ log_do_end(log_l2_a)
+
+ log_do_begin(log_l2_b_a)
+ count++;
+ log_do_end(log_l2_b_a)
+
+ log_do_begin(log_l2_b_b)
+ count++;
+ log_do_end(log_l2_b_b)
+ }
+
+ assert_d_eq(count, 40, "Mis-logged!");
+}
+TEST_END
+
+TEST_BEGIN(test_log_enabled_global) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
+ strcpy(log_var_names, "abc|.|def");
+
+ log_var_t log_l1 = LOG_VAR_INIT("l1");
+ log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a");
+
+ int count = 0;
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(log_l1)
+ count++;
+ log_do_end(log_l1)
+
+ log_do_begin(log_l2_a_a)
+ count++;
+ log_do_end(log_l2_a_a)
+ }
+ assert_d_eq(count, 20, "Mis-logged!");
+}
+TEST_END
+
+TEST_BEGIN(test_logs_if_no_init) {
+ test_skip_if(!config_log);
+ atomic_store_b(&log_init_done, false, ATOMIC_RELAXED);
+
+ log_var_t l = LOG_VAR_INIT("definitely.not.enabled");
+
+ int count = 0;
+ for (int i = 0; i < 10; i++) {
+ log_do_begin(l)
+ count++;
+ log_do_end(l)
+ }
+ assert_d_eq(count, 0, "Logging shouldn't happen if not initialized.");
+}
+TEST_END
+
+/*
+ * This really just checks to make sure that this usage compiles; we don't have
+ * any test code to run.
+ */
+TEST_BEGIN(test_log_only_format_string) {
+ if (false) {
+ LOG("log_str", "No arguments follow this format string.");
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_log_disabled,
+ test_log_enabled_direct,
+ test_log_enabled_indirect,
+ test_log_enabled_global,
+ test_logs_if_no_init,
+ test_log_only_format_string);
+}
diff --git a/deps/jemalloc/test/unit/mallctl.c b/deps/jemalloc/test/unit/mallctl.c
new file mode 100644
index 0000000..3a75ac0
--- /dev/null
+++ b/deps/jemalloc/test/unit/mallctl.c
@@ -0,0 +1,888 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/hook.h"
+#include "jemalloc/internal/util.h"
+
+TEST_BEGIN(test_mallctl_errors) {
+ uint64_t epoch;
+ size_t sz;
+
+ assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
+ "mallctl() should return ENOENT for non-existent names");
+
+ assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
+ EPERM, "mallctl() should return EPERM on attempt to write "
+ "read-only value");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ sizeof(epoch)-1), EINVAL,
+ "mallctl() should return EINVAL for input size mismatch");
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ sizeof(epoch)+1), EINVAL,
+ "mallctl() should return EINVAL for input size mismatch");
+
+ sz = sizeof(epoch)-1;
+ assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
+ "mallctl() should return EINVAL for output size mismatch");
+ sz = sizeof(epoch)+1;
+ assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
+ "mallctl() should return EINVAL for output size mismatch");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_errors) {
+ size_t mib[1];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
+ "mallctlnametomib() should return ENOENT for non-existent names");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlbymib_errors) {
+ uint64_t epoch;
+ size_t sz;
+ size_t mib[1];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
+ strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
+ "attempt to write read-only value");
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
+ sizeof(epoch)-1), EINVAL,
+ "mallctlbymib() should return EINVAL for input size mismatch");
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
+ sizeof(epoch)+1), EINVAL,
+ "mallctlbymib() should return EINVAL for input size mismatch");
+
+ sz = sizeof(epoch)-1;
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
+ EINVAL,
+ "mallctlbymib() should return EINVAL for output size mismatch");
+ sz = sizeof(epoch)+1;
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
+ EINVAL,
+ "mallctlbymib() should return EINVAL for output size mismatch");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_read_write) {
+ uint64_t old_epoch, new_epoch;
+ size_t sz = sizeof(old_epoch);
+
+ /* Blind. */
+ assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Read. */
+ assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Write. */
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
+ sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Read+write. */
+ assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
+ (void *)&new_epoch, sizeof(new_epoch)), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_short_mib) {
+ size_t mib[4];
+ size_t miblen;
+
+ miblen = 3;
+ mib[3] = 42;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ assert_zu_eq(miblen, 3, "Unexpected mib output length");
+ assert_zu_eq(mib[3], 42,
+ "mallctlnametomib() wrote past the end of the input mib");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_config) {
+#define TEST_MALLCTL_CONFIG(config, t) do { \
+ t oldval; \
+ size_t sz = sizeof(oldval); \
+ assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
+ NULL, 0), 0, "Unexpected mallctl() failure"); \
+ assert_b_eq(oldval, config_##config, "Incorrect config value"); \
+ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+} while (0)
+
+ TEST_MALLCTL_CONFIG(cache_oblivious, bool);
+ TEST_MALLCTL_CONFIG(debug, bool);
+ TEST_MALLCTL_CONFIG(fill, bool);
+ TEST_MALLCTL_CONFIG(lazy_lock, bool);
+ TEST_MALLCTL_CONFIG(malloc_conf, const char *);
+ TEST_MALLCTL_CONFIG(prof, bool);
+ TEST_MALLCTL_CONFIG(prof_libgcc, bool);
+ TEST_MALLCTL_CONFIG(prof_libunwind, bool);
+ TEST_MALLCTL_CONFIG(stats, bool);
+ TEST_MALLCTL_CONFIG(utrace, bool);
+ TEST_MALLCTL_CONFIG(xmalloc, bool);
+
+#undef TEST_MALLCTL_CONFIG
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_opt) {
+ bool config_always = true;
+
+#define TEST_MALLCTL_OPT(t, opt, config) do { \
+ t oldval; \
+ size_t sz = sizeof(oldval); \
+ int expected = config_##config ? 0 : ENOENT; \
+ int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
+ 0); \
+ assert_d_eq(result, expected, \
+ "Unexpected mallctl() result for opt."#opt); \
+ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+} while (0)
+
+ TEST_MALLCTL_OPT(bool, abort, always);
+ TEST_MALLCTL_OPT(bool, abort_conf, always);
+ TEST_MALLCTL_OPT(bool, confirm_conf, always);
+ TEST_MALLCTL_OPT(const char *, metadata_thp, always);
+ TEST_MALLCTL_OPT(bool, retain, always);
+ TEST_MALLCTL_OPT(const char *, dss, always);
+ TEST_MALLCTL_OPT(unsigned, narenas, always);
+ TEST_MALLCTL_OPT(const char *, percpu_arena, always);
+ TEST_MALLCTL_OPT(size_t, oversize_threshold, always);
+ TEST_MALLCTL_OPT(bool, background_thread, always);
+ TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
+ TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
+ TEST_MALLCTL_OPT(bool, stats_print, always);
+ TEST_MALLCTL_OPT(const char *, junk, fill);
+ TEST_MALLCTL_OPT(bool, zero, fill);
+ TEST_MALLCTL_OPT(bool, utrace, utrace);
+ TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
+ TEST_MALLCTL_OPT(bool, tcache, always);
+ TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always);
+ TEST_MALLCTL_OPT(size_t, lg_tcache_max, always);
+ TEST_MALLCTL_OPT(const char *, thp, always);
+ TEST_MALLCTL_OPT(bool, prof, prof);
+ TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
+ TEST_MALLCTL_OPT(bool, prof_active, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
+ TEST_MALLCTL_OPT(bool, prof_accum, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
+ TEST_MALLCTL_OPT(bool, prof_gdump, prof);
+ TEST_MALLCTL_OPT(bool, prof_final, prof);
+ TEST_MALLCTL_OPT(bool, prof_leak, prof);
+
+#undef TEST_MALLCTL_OPT
+}
+TEST_END
+
+TEST_BEGIN(test_manpage_example) {
+ unsigned nbins, i;
+ size_t mib[4];
+ size_t len, miblen;
+
+ len = sizeof(nbins);
+ assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ miblen = 4;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ for (i = 0; i < nbins; i++) {
+ size_t bin_size;
+
+ mib[2] = i;
+ len = sizeof(bin_size);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
+ NULL, 0), 0, "Unexpected mallctlbymib() failure");
+ /* Do something with bin_size... */
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_tcache_none) {
+ test_skip_if(!opt_tcache);
+
+ /* Allocate p and q. */
+ void *p0 = mallocx(42, 0);
+ assert_ptr_not_null(p0, "Unexpected mallocx() failure");
+ void *q = mallocx(42, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+
+ /* Deallocate p and q, but bypass the tcache for q. */
+ dallocx(p0, 0);
+ dallocx(q, MALLOCX_TCACHE_NONE);
+
+ /* Make sure that tcache-based allocation returns p, not q. */
+ void *p1 = mallocx(42, 0);
+ assert_ptr_not_null(p1, "Unexpected mallocx() failure");
+ assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
+
+ /* Clean up. */
+ dallocx(p1, MALLOCX_TCACHE_NONE);
+}
+TEST_END
+
+TEST_BEGIN(test_tcache) {
+#define NTCACHES 10
+ unsigned tis[NTCACHES];
+ void *ps[NTCACHES];
+ void *qs[NTCACHES];
+ unsigned i;
+ size_t sz, psz, qsz;
+
+ psz = 42;
+ qsz = nallocx(psz, 0) + 1;
+
+ /* Create tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure, i=%u", i);
+ }
+
+ /* Exercise tcache ID recycling. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
+ (void *)&tis[i], sizeof(unsigned)), 0,
+ "Unexpected mallctl() failure, i=%u", i);
+ }
+ for (i = 0; i < NTCACHES; i++) {
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure, i=%u", i);
+ }
+
+ /* Flush empty tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+
+ /* Cache some allocations. */
+ for (i = 0; i < NTCACHES; i++) {
+ ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
+
+ qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+ }
+
+ /* Verify that tcaches allocate cached regions. */
+ for (i = 0; i < NTCACHES; i++) {
+ void *p0 = ps[i];
+ ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ assert_ptr_eq(ps[i], p0,
+ "Expected mallocx() to allocate cached region, i=%u", i);
+ }
+
+ /* Verify that reallocation uses cached regions. */
+ for (i = 0; i < NTCACHES; i++) {
+ void *q0 = qs[i];
+ qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
+ i);
+ assert_ptr_eq(qs[i], q0,
+ "Expected rallocx() to allocate cached region, i=%u", i);
+ /* Avoid undefined behavior in case of test failure. */
+ if (qs[i] == NULL) {
+ qs[i] = ps[i];
+ }
+ }
+ for (i = 0; i < NTCACHES; i++) {
+ dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+ }
+
+ /* Flush some non-empty tcaches. */
+ for (i = 0; i < NTCACHES/2; i++) {
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+
+ /* Destroy tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
+ (void *)&tis[i], sizeof(unsigned)), 0,
+ "Unexpected mallctl() failure, i=%u", i);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_thread_arena) {
+ unsigned old_arena_ind, new_arena_ind, narenas;
+
+ const char *opa;
+ size_t sz = sizeof(opa);
+ assert_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ if (opt_oversize_threshold != 0) {
+ narenas--;
+ }
+ assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
+
+ if (strcmp(opa, "disabled") == 0) {
+ new_arena_ind = narenas - 1;
+ assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ (void *)&new_arena_ind, sizeof(unsigned)), 0,
+ "Unexpected mallctl() failure");
+ new_arena_ind = 0;
+ assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ (void *)&new_arena_ind, sizeof(unsigned)), 0,
+ "Unexpected mallctl() failure");
+ } else {
+ assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
+ if (old_arena_ind != new_arena_ind) {
+ assert_d_eq(mallctl("thread.arena",
+ (void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
+ sizeof(unsigned)), EPERM, "thread.arena ctl "
+ "should not be allowed with percpu arena");
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_initialized) {
+ unsigned narenas, i;
+ size_t sz;
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ bool initialized;
+
+ sz = sizeof(narenas);
+ assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ for (i = 0; i < narenas; i++) {
+ mib[1] = i;
+ sz = sizeof(initialized);
+ assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+ }
+
+ mib[1] = MALLCTL_ARENAS_ALL;
+ sz = sizeof(initialized);
+ assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_true(initialized,
+ "Merged arena statistics should always be initialized");
+
+ /* Equivalent to the above but using mallctl() directly. */
+ sz = sizeof(initialized);
+ assert_d_eq(mallctl(
+ "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
+ (void *)&initialized, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_true(initialized,
+ "Merged arena statistics should always be initialized");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_dirty_decay_ms) {
+ ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
+ size_t sz = sizeof(ssize_t);
+
+ assert_d_eq(mallctl("arena.0.dirty_decay_ms",
+ (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ dirty_decay_ms = -2;
+ assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ dirty_decay_ms = 0x7fffffff;
+ assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+
+ for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
+ dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
+ dirty_decay_ms++) {
+ ssize_t old_dirty_decay_ms;
+
+ assert_d_eq(mallctl("arena.0.dirty_decay_ms",
+ (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
+ sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
+ "Unexpected old arena.0.dirty_decay_ms");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
+ ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
+ size_t sz = sizeof(ssize_t);
+
+ assert_d_eq(mallctl("arena.0.muzzy_decay_ms",
+ (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ muzzy_decay_ms = -2;
+ assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ muzzy_decay_ms = 0x7fffffff;
+ assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+
+ for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
+ muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
+ muzzy_decay_ms++) {
+ ssize_t old_muzzy_decay_ms;
+
+ assert_d_eq(mallctl("arena.0.muzzy_decay_ms",
+ (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
+ sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
+ "Unexpected old arena.0.muzzy_decay_ms");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_purge) {
+ unsigned narenas;
+ size_t sz = sizeof(unsigned);
+ size_t mib[3];
+ size_t miblen = 3;
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = narenas;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+
+ mib[1] = MALLCTL_ARENAS_ALL;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_decay) {
+ unsigned narenas;
+ size_t sz = sizeof(unsigned);
+ size_t mib[3];
+ size_t miblen = 3;
+
+ assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = narenas;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+
+ mib[1] = MALLCTL_ARENAS_ALL;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_dss) {
+ const char *dss_prec_old, *dss_prec_new;
+ size_t sz = sizeof(dss_prec_old);
+ size_t mib[3];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+
+ dss_prec_new = "disabled";
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
+ (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
+ "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
+
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
+ (void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected value for dss precedence");
+
+ mib[1] = narenas_total_get();
+ dss_prec_new = "disabled";
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
+ (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
+ "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
+
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
+ (void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected value for dss precedence");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_retain_grow_limit) {
+ size_t old_limit, new_limit, default_limit;
+ size_t mib[3];
+ size_t miblen;
+
+ bool retain_enabled;
+ size_t sz = sizeof(retain_enabled);
+ assert_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ test_skip_if(!retain_enabled);
+
+ sz = sizeof(default_limit);
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
+ 0, "Unexpected mallctlnametomib() error");
+
+ assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(default_limit, SC_LARGE_MAXCLASS,
+ "Unexpected default for retain_grow_limit");
+
+ new_limit = PAGE - 1;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ sizeof(new_limit)), EFAULT, "Unexpected mallctl() success");
+
+ new_limit = PAGE + 1;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ sizeof(new_limit)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(old_limit, PAGE,
+ "Unexpected value for retain_grow_limit");
+
+ /* Expect grow less than psize class 10. */
+ new_limit = sz_pind2sz(10) - 1;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ sizeof(new_limit)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(old_limit, sz_pind2sz(9),
+ "Unexpected value for retain_grow_limit");
+
+ /* Restore to default. */
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
+ sizeof(default_limit)), 0, "Unexpected mallctl() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_dirty_decay_ms) {
+ ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
+ size_t sz = sizeof(ssize_t);
+
+ assert_d_eq(mallctl("arenas.dirty_decay_ms",
+ (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ dirty_decay_ms = -2;
+ assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ dirty_decay_ms = 0x7fffffff;
+ assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
+ (void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
+ "Expected mallctl() failure");
+
+ for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
+ dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
+ dirty_decay_ms++) {
+ ssize_t old_dirty_decay_ms;
+
+ assert_d_eq(mallctl("arenas.dirty_decay_ms",
+ (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
+ sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
+ "Unexpected old arenas.dirty_decay_ms");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_muzzy_decay_ms) {
+ ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
+ size_t sz = sizeof(ssize_t);
+
+ assert_d_eq(mallctl("arenas.muzzy_decay_ms",
+ (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ muzzy_decay_ms = -2;
+ assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ muzzy_decay_ms = 0x7fffffff;
+ assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
+ (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
+ "Expected mallctl() failure");
+
+ for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
+ muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
+ muzzy_decay_ms++) {
+ ssize_t old_muzzy_decay_ms;
+
+ assert_d_eq(mallctl("arenas.muzzy_decay_ms",
+ (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
+ sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
+ "Unexpected old arenas.muzzy_decay_ms");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_constants) {
+#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
+ TEST_ARENAS_CONSTANT(size_t, page, PAGE);
+ TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
+ TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
+
+#undef TEST_ARENAS_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_bin_constants) {
+#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
+ NULL, 0), 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
+ TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs);
+ TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
+ bin_infos[0].slab_size);
+ TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards);
+
+#undef TEST_ARENAS_BIN_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_lextent_constants) {
+#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
+ &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
+ SC_LARGE_MINCLASS);
+
+#undef TEST_ARENAS_LEXTENT_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_create) {
+ unsigned narenas_before, arena, narenas_after;
+ size_t sz = sizeof(unsigned);
+
+ assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+
+ assert_u_eq(narenas_before+1, narenas_after,
+ "Unexpected number of arenas before versus after extension");
+ assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_lookup) {
+ unsigned arena, arena1;
+ void *ptr;
+ size_t sz = sizeof(unsigned);
+
+ assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(ptr, "Unexpected mallocx() failure");
+ assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
+ 0, "Unexpected mallctl() failure");
+ assert_u_eq(arena, arena1, "Unexpected arena index");
+ dallocx(ptr, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas) {
+#define TEST_STATS_ARENAS(t, name) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
+ NULL, 0), 0, "Unexpected mallctl() failure"); \
+} while (0)
+
+ TEST_STATS_ARENAS(unsigned, nthreads);
+ TEST_STATS_ARENAS(const char *, dss);
+ TEST_STATS_ARENAS(ssize_t, dirty_decay_ms);
+ TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms);
+ TEST_STATS_ARENAS(size_t, pactive);
+ TEST_STATS_ARENAS(size_t, pdirty);
+
+#undef TEST_STATS_ARENAS
+}
+TEST_END
+
+static void
+alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
+ UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
+ *(bool *)extra = true;
+}
+
+static void
+dalloc_hook(void *extra, UNUSED hook_dalloc_t type,
+ UNUSED void *address, UNUSED uintptr_t args_raw[3]) {
+ *(bool *)extra = true;
+}
+
+TEST_BEGIN(test_hooks) {
+ bool hook_called = false;
+ hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
+ void *handle = NULL;
+ size_t sz = sizeof(handle);
+ int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
+ sizeof(hooks));
+ assert_d_eq(err, 0, "Hook installation failed");
+ assert_ptr_ne(handle, NULL, "Hook installation gave null handle");
+ void *ptr = mallocx(1, 0);
+ assert_true(hook_called, "Alloc hook not called");
+ hook_called = false;
+ free(ptr);
+ assert_true(hook_called, "Free hook not called");
+
+ err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
+ sizeof(handle));
+ assert_d_eq(err, 0, "Hook removal failed");
+ hook_called = false;
+ ptr = mallocx(1, 0);
+ free(ptr);
+ assert_false(hook_called, "Hook called after removal");
+}
+TEST_END
+
+TEST_BEGIN(test_hooks_exhaustion) {
+ bool hook_called = false;
+ hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
+
+ void *handle;
+ void *handles[HOOK_MAX];
+ size_t sz = sizeof(handle);
+ int err;
+ for (int i = 0; i < HOOK_MAX; i++) {
+ handle = NULL;
+ err = mallctl("experimental.hooks.install", &handle, &sz,
+ &hooks, sizeof(hooks));
+ assert_d_eq(err, 0, "Error installation hooks");
+ assert_ptr_ne(handle, NULL, "Got NULL handle");
+ handles[i] = handle;
+ }
+ err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
+ sizeof(hooks));
+ assert_d_eq(err, EAGAIN, "Should have failed hook installation");
+ for (int i = 0; i < HOOK_MAX; i++) {
+ err = mallctl("experimental.hooks.remove", NULL, NULL,
+ &handles[i], sizeof(handles[i]));
+ assert_d_eq(err, 0, "Hook removal failed");
+ }
+ /* Insertion failed, but then we removed some; it should work now. */
+ handle = NULL;
+ err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
+ sizeof(hooks));
+ assert_d_eq(err, 0, "Hook insertion failed");
+ assert_ptr_ne(handle, NULL, "Got NULL handle");
+ err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
+ sizeof(handle));
+ assert_d_eq(err, 0, "Hook removal failed");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_mallctl_errors,
+ test_mallctlnametomib_errors,
+ test_mallctlbymib_errors,
+ test_mallctl_read_write,
+ test_mallctlnametomib_short_mib,
+ test_mallctl_config,
+ test_mallctl_opt,
+ test_manpage_example,
+ test_tcache_none,
+ test_tcache,
+ test_thread_arena,
+ test_arena_i_initialized,
+ test_arena_i_dirty_decay_ms,
+ test_arena_i_muzzy_decay_ms,
+ test_arena_i_purge,
+ test_arena_i_decay,
+ test_arena_i_dss,
+ test_arena_i_retain_grow_limit,
+ test_arenas_dirty_decay_ms,
+ test_arenas_muzzy_decay_ms,
+ test_arenas_constants,
+ test_arenas_bin_constants,
+ test_arenas_lextent_constants,
+ test_arenas_create,
+ test_arenas_lookup,
+ test_stats_arenas,
+ test_hooks,
+ test_hooks_exhaustion);
+}
diff --git a/deps/jemalloc/test/unit/malloc_io.c b/deps/jemalloc/test/unit/malloc_io.c
new file mode 100644
index 0000000..79ba7fc
--- /dev/null
+++ b/deps/jemalloc/test/unit/malloc_io.c
@@ -0,0 +1,258 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_malloc_strtoumax_no_endptr) {
+ int err;
+
+ set_errno(0);
+ assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
+ err = get_errno();
+ assert_d_eq(err, 0, "Unexpected failure");
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_strtoumax) {
+ struct test_s {
+ const char *input;
+ const char *expected_remainder;
+ int base;
+ int expected_errno;
+ const char *expected_errno_name;
+ uintmax_t expected_x;
+ };
+#define ERR(e) e, #e
+#define KUMAX(x) ((uintmax_t)x##ULL)
+#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL)
+ struct test_s tests[] = {
+ {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
+ {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
+ {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX},
+
+ {"", "", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
+
+ {"42", "", 0, ERR(0), KUMAX(42)},
+ {"+42", "", 0, ERR(0), KUMAX(42)},
+ {"-42", "", 0, ERR(0), KSMAX(-42)},
+ {"042", "", 0, ERR(0), KUMAX(042)},
+ {"+042", "", 0, ERR(0), KUMAX(042)},
+ {"-042", "", 0, ERR(0), KSMAX(-042)},
+ {"0x42", "", 0, ERR(0), KUMAX(0x42)},
+ {"+0x42", "", 0, ERR(0), KUMAX(0x42)},
+ {"-0x42", "", 0, ERR(0), KSMAX(-0x42)},
+
+ {"0", "", 0, ERR(0), KUMAX(0)},
+ {"1", "", 0, ERR(0), KUMAX(1)},
+
+ {"42", "", 0, ERR(0), KUMAX(42)},
+ {" 42", "", 0, ERR(0), KUMAX(42)},
+ {"42 ", " ", 0, ERR(0), KUMAX(42)},
+ {"0x", "x", 0, ERR(0), KUMAX(0)},
+ {"42x", "x", 0, ERR(0), KUMAX(42)},
+
+ {"07", "", 0, ERR(0), KUMAX(7)},
+ {"010", "", 0, ERR(0), KUMAX(8)},
+ {"08", "8", 0, ERR(0), KUMAX(0)},
+ {"0_", "_", 0, ERR(0), KUMAX(0)},
+
+ {"0x", "x", 0, ERR(0), KUMAX(0)},
+ {"0X", "X", 0, ERR(0), KUMAX(0)},
+ {"0xg", "xg", 0, ERR(0), KUMAX(0)},
+ {"0XA", "", 0, ERR(0), KUMAX(10)},
+
+ {"010", "", 10, ERR(0), KUMAX(10)},
+ {"0x3", "x3", 10, ERR(0), KUMAX(0)},
+
+ {"12", "2", 2, ERR(0), KUMAX(1)},
+ {"78", "8", 8, ERR(0), KUMAX(7)},
+ {"9a", "a", 10, ERR(0), KUMAX(9)},
+ {"9A", "A", 10, ERR(0), KUMAX(9)},
+ {"fg", "g", 16, ERR(0), KUMAX(15)},
+ {"FG", "G", 16, ERR(0), KUMAX(15)},
+ {"0xfg", "g", 16, ERR(0), KUMAX(15)},
+ {"0XFG", "G", 16, ERR(0), KUMAX(15)},
+ {"z_", "_", 36, ERR(0), KUMAX(35)},
+ {"Z_", "_", 36, ERR(0), KUMAX(35)}
+ };
+#undef ERR
+#undef KUMAX
+#undef KSMAX
+ unsigned i;
+
+ for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
+ struct test_s *test = &tests[i];
+ int err;
+ uintmax_t result;
+ char *remainder;
+
+ set_errno(0);
+ result = malloc_strtoumax(test->input, &remainder, test->base);
+ err = get_errno();
+ assert_d_eq(err, test->expected_errno,
+ "Expected errno %s for \"%s\", base %d",
+ test->expected_errno_name, test->input, test->base);
+ assert_str_eq(remainder, test->expected_remainder,
+ "Unexpected remainder for \"%s\", base %d",
+ test->input, test->base);
+ if (err == 0) {
+ assert_ju_eq(result, test->expected_x,
+ "Unexpected result for \"%s\", base %d",
+ test->input, test->base);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_snprintf_truncated) {
+#define BUFLEN 15
+ char buf[BUFLEN];
+ size_t result;
+ size_t len;
+#define TEST(expected_str_untruncated, ...) do { \
+ result = malloc_snprintf(buf, len, __VA_ARGS__); \
+ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
+ "Unexpected string inequality (\"%s\" vs \"%s\")", \
+ buf, expected_str_untruncated); \
+ assert_zu_eq(result, strlen(expected_str_untruncated), \
+ "Unexpected result"); \
+} while (0)
+
+ for (len = 1; len < BUFLEN; len++) {
+ TEST("012346789", "012346789");
+ TEST("a0123b", "a%sb", "0123");
+ TEST("a01234567", "a%s%s", "0123", "4567");
+ TEST("a0123 ", "a%-6s", "0123");
+ TEST("a 0123", "a%6s", "0123");
+ TEST("a 012", "a%6.3s", "0123");
+ TEST("a 012", "a%*.*s", 6, 3, "0123");
+ TEST("a 123b", "a% db", 123);
+ TEST("a123b", "a%-db", 123);
+ TEST("a-123b", "a%-db", -123);
+ TEST("a+123b", "a%+db", 123);
+ }
+#undef BUFLEN
+#undef TEST
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_snprintf) {
+#define BUFLEN 128
+ char buf[BUFLEN];
+ size_t result;
+#define TEST(expected_str, ...) do { \
+ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
+ assert_str_eq(buf, expected_str, "Unexpected output"); \
+ assert_zu_eq(result, strlen(expected_str), "Unexpected result");\
+} while (0)
+
+ TEST("hello", "hello");
+
+ TEST("50%, 100%", "50%%, %d%%", 100);
+
+ TEST("a0123b", "a%sb", "0123");
+
+ TEST("a 0123b", "a%5sb", "0123");
+ TEST("a 0123b", "a%*sb", 5, "0123");
+
+ TEST("a0123 b", "a%-5sb", "0123");
+ TEST("a0123b", "a%*sb", -1, "0123");
+ TEST("a0123 b", "a%*sb", -5, "0123");
+ TEST("a0123 b", "a%-*sb", -5, "0123");
+
+ TEST("a012b", "a%.3sb", "0123");
+ TEST("a012b", "a%.*sb", 3, "0123");
+ TEST("a0123b", "a%.*sb", -3, "0123");
+
+ TEST("a 012b", "a%5.3sb", "0123");
+ TEST("a 012b", "a%5.*sb", 3, "0123");
+ TEST("a 012b", "a%*.3sb", 5, "0123");
+ TEST("a 012b", "a%*.*sb", 5, 3, "0123");
+ TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
+
+ TEST("_abcd_", "_%x_", 0xabcd);
+ TEST("_0xabcd_", "_%#x_", 0xabcd);
+ TEST("_1234_", "_%o_", 01234);
+ TEST("_01234_", "_%#o_", 01234);
+ TEST("_1234_", "_%u_", 1234);
+
+ TEST("_1234_", "_%d_", 1234);
+ TEST("_ 1234_", "_% d_", 1234);
+ TEST("_+1234_", "_%+d_", 1234);
+ TEST("_-1234_", "_%d_", -1234);
+ TEST("_-1234_", "_% d_", -1234);
+ TEST("_-1234_", "_%+d_", -1234);
+
+ TEST("_-1234_", "_%d_", -1234);
+ TEST("_1234_", "_%d_", 1234);
+ TEST("_-1234_", "_%i_", -1234);
+ TEST("_1234_", "_%i_", 1234);
+ TEST("_01234_", "_%#o_", 01234);
+ TEST("_1234_", "_%u_", 1234);
+ TEST("_0x1234abc_", "_%#x_", 0x1234abc);
+ TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
+ TEST("_c_", "_%c_", 'c');
+ TEST("_string_", "_%s_", "string");
+ TEST("_0x42_", "_%p_", ((void *)0x42));
+
+ TEST("_-1234_", "_%ld_", ((long)-1234));
+ TEST("_1234_", "_%ld_", ((long)1234));
+ TEST("_-1234_", "_%li_", ((long)-1234));
+ TEST("_1234_", "_%li_", ((long)1234));
+ TEST("_01234_", "_%#lo_", ((long)01234));
+ TEST("_1234_", "_%lu_", ((long)1234));
+ TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
+
+ TEST("_-1234_", "_%lld_", ((long long)-1234));
+ TEST("_1234_", "_%lld_", ((long long)1234));
+ TEST("_-1234_", "_%lli_", ((long long)-1234));
+ TEST("_1234_", "_%lli_", ((long long)1234));
+ TEST("_01234_", "_%#llo_", ((long long)01234));
+ TEST("_1234_", "_%llu_", ((long long)1234));
+ TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
+
+ TEST("_-1234_", "_%qd_", ((long long)-1234));
+ TEST("_1234_", "_%qd_", ((long long)1234));
+ TEST("_-1234_", "_%qi_", ((long long)-1234));
+ TEST("_1234_", "_%qi_", ((long long)1234));
+ TEST("_01234_", "_%#qo_", ((long long)01234));
+ TEST("_1234_", "_%qu_", ((long long)1234));
+ TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
+
+ TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
+ TEST("_1234_", "_%jd_", ((intmax_t)1234));
+ TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
+ TEST("_1234_", "_%ji_", ((intmax_t)1234));
+ TEST("_01234_", "_%#jo_", ((intmax_t)01234));
+ TEST("_1234_", "_%ju_", ((intmax_t)1234));
+ TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
+ TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
+
+ TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
+ TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
+ TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
+ TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
+
+ TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
+ TEST("_1234_", "_%zd_", ((ssize_t)1234));
+ TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
+ TEST("_1234_", "_%zi_", ((ssize_t)1234));
+ TEST("_01234_", "_%#zo_", ((ssize_t)01234));
+ TEST("_1234_", "_%zu_", ((ssize_t)1234));
+ TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
+ TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
+#undef BUFLEN
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_malloc_strtoumax_no_endptr,
+ test_malloc_strtoumax,
+ test_malloc_snprintf_truncated,
+ test_malloc_snprintf);
+}
diff --git a/deps/jemalloc/test/unit/math.c b/deps/jemalloc/test/unit/math.c
new file mode 100644
index 0000000..09ef20c
--- /dev/null
+++ b/deps/jemalloc/test/unit/math.c
@@ -0,0 +1,390 @@
+#include "test/jemalloc_test.h"
+
+#define MAX_REL_ERR 1.0e-9
+#define MAX_ABS_ERR 1.0e-9
+
+#include <float.h>
+
+#ifdef __PGI
+#undef INFINITY
+#endif
+
+#ifndef INFINITY
+#define INFINITY (DBL_MAX + DBL_MAX)
+#endif
+
+static bool
+double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) {
+ double rel_err;
+
+ if (fabs(a - b) < max_abs_err) {
+ return true;
+ }
+ rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
+ return (rel_err < max_rel_err);
+}
+
+static uint64_t
+factorial(unsigned x) {
+ uint64_t ret = 1;
+ unsigned i;
+
+ for (i = 2; i <= x; i++) {
+ ret *= (uint64_t)i;
+ }
+
+ return ret;
+}
+
+TEST_BEGIN(test_ln_gamma_factorial) {
+ unsigned x;
+
+ /* exp(ln_gamma(x)) == (x-1)! for integer x. */
+ for (x = 1; x <= 21; x++) {
+ assert_true(double_eq_rel(exp(ln_gamma(x)),
+ (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect factorial result for x=%u", x);
+ }
+}
+TEST_END
+
+/* Expected ln_gamma([0.0..100.0] increment=0.25). */
+static const double ln_gamma_misc_expected[] = {
+ INFINITY,
+ 1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
+ 0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
+ -0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
+ 0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
+ 0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
+ 1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
+ 2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
+ 3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
+ 5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
+ 6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
+ 8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
+ 9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
+ 11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
+ 12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
+ 14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
+ 16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
+ 18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
+ 19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
+ 21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
+ 23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
+ 25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
+ 27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
+ 29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
+ 32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
+ 34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
+ 36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
+ 38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
+ 40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
+ 43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
+ 45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
+ 47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
+ 50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
+ 52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
+ 54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
+ 57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
+ 59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
+ 62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
+ 64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
+ 67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
+ 69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
+ 72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
+ 74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
+ 77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
+ 79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
+ 82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
+ 85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
+ 87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
+ 90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
+ 93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
+ 95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
+ 98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
+ 101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
+ 103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
+ 106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
+ 109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
+ 112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
+ 114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
+ 117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
+ 120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
+ 123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
+ 126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
+ 129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
+ 131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
+ 134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
+ 137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
+ 140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
+ 143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
+ 146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
+ 149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
+ 152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
+ 155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
+ 158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
+ 161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
+ 164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
+ 167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
+ 170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
+ 173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
+ 176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
+ 179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
+ 182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
+ 185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
+ 188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
+ 191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
+ 194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
+ 197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
+ 201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
+ 204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
+ 207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
+ 210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
+ 213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
+ 216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
+ 219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
+ 223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
+ 226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
+ 229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
+ 232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
+ 235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
+ 238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
+ 242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
+ 245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
+ 248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
+ 251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
+ 255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
+ 258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
+ 261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
+ 264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
+ 268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
+ 271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
+ 274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
+ 278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
+ 281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
+ 284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
+ 287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
+ 291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
+ 294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
+ 297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
+ 301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
+ 304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
+ 308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
+ 311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
+ 314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
+ 318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
+ 321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
+ 324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
+ 328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
+ 331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
+ 335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
+ 338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
+ 341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
+ 345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
+ 348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
+ 352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
+ 355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
+ 359.13420536957539753
+};
+
+TEST_BEGIN(test_ln_gamma_misc) {
+ unsigned i;
+
+ for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
+ double x = (double)i * 0.25;
+ assert_true(double_eq_rel(ln_gamma(x),
+ ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect ln_gamma result for i=%u", i);
+ }
+}
+TEST_END
+
+/* Expected pt_norm([0.01..0.99] increment=0.01). */
+static const double pt_norm_expected[] = {
+ -INFINITY,
+ -2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
+ -1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
+ -1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
+ -1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
+ -1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
+ -0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
+ -0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
+ -0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
+ -0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
+ -0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
+ -0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
+ -0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
+ -0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
+ -0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
+ -0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
+ -0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
+ -0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
+ 0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
+ 0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
+ 0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
+ 0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
+ 0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
+ 0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
+ 0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
+ 0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
+ 0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
+ 0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
+ 0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
+ 1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
+ 1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
+ 1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
+ 1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
+ 1.88079360815125041, 2.05374891063182208, 2.32634787404084076
+};
+
+TEST_BEGIN(test_pt_norm) {
+ unsigned i;
+
+ for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
+ double p = (double)i * 0.01;
+ assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
+ MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect pt_norm result for i=%u", i);
+ }
+}
+TEST_END
+
+/*
+ * Expected pt_chi2(p=[0.01..0.99] increment=0.07,
+ * df={0.1, 1.1, 10.1, 100.1, 1000.1}).
+ */
+static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
+static const double pt_chi2_expected[] = {
+ 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
+ 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
+ 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
+ 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
+ 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
+
+ 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
+ 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
+ 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
+ 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
+ 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
+
+ 2.606673548632508, 4.602913725294877, 5.646152813924212,
+ 6.488971315540869, 7.249823275816285, 7.977314231410841,
+ 8.700354939944047, 9.441728024225892, 10.224338321374127,
+ 11.076435368801061, 12.039320937038386, 13.183878752697167,
+ 14.657791935084575, 16.885728216339373, 23.361991680031817,
+
+ 70.14844087392152, 80.92379498849355, 85.53325420085891,
+ 88.94433120715347, 91.83732712857017, 94.46719943606301,
+ 96.96896479994635, 99.43412843510363, 101.94074719829733,
+ 104.57228644307247, 107.43900093448734, 110.71844673417287,
+ 114.76616819871325, 120.57422505959563, 135.92318818757556,
+
+ 899.0072447849649, 937.9271278858220, 953.8117189560207,
+ 965.3079371501154, 974.8974061207954, 983.4936235182347,
+ 991.5691170518946, 999.4334123954690, 1007.3391826856553,
+ 1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
+ 1046.4872561869577, 1063.5717461999654, 1107.0741966053859
+};
+
+TEST_BEGIN(test_pt_chi2) {
+ unsigned i, j;
+ unsigned e = 0;
+
+ for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
+ double df = pt_chi2_df[i];
+ double ln_gamma_df = ln_gamma(df * 0.5);
+ for (j = 1; j < 100; j += 7) {
+ double p = (double)j * 0.01;
+ assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
+ pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect pt_chi2 result for i=%u, j=%u", i, j);
+ e++;
+ }
+ }
+}
+TEST_END
+
+/*
+ * Expected pt_gamma(p=[0.1..0.99] increment=0.07,
+ * shape=[0.5..3.0] increment=0.5).
+ */
+static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
+static const double pt_gamma_expected[] = {
+ 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
+ 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
+ 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
+ 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
+ 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
+
+ 0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
+ 0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
+ 0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
+ 1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
+ 1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
+
+ 0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
+ 0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
+ 1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
+ 1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
+ 2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
+
+ 0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
+ 0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
+ 1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
+ 2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
+ 3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
+
+ 0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
+ 1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
+ 1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
+ 2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
+ 4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
+
+ 0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
+ 1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
+ 2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
+ 3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
+ 4.7230515633946677, 5.6417477865306020, 8.4059469148854635
+};
+
+TEST_BEGIN(test_pt_gamma_shape) {
+ unsigned i, j;
+ unsigned e = 0;
+
+ for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
+ double shape = pt_gamma_shape[i];
+ double ln_gamma_shape = ln_gamma(shape);
+ for (j = 1; j < 100; j += 7) {
+ double p = (double)j * 0.01;
+ assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
+ ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
+ MAX_ABS_ERR),
+ "Incorrect pt_gamma result for i=%u, j=%u", i, j);
+ e++;
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_pt_gamma_scale) {
+ double shape = 1.0;
+ double ln_gamma_shape = ln_gamma(shape);
+
+ assert_true(double_eq_rel(
+ pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
+ pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
+ MAX_ABS_ERR),
+ "Scale should be trivially equivalent to external multiplication");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_ln_gamma_factorial,
+ test_ln_gamma_misc,
+ test_pt_norm,
+ test_pt_chi2,
+ test_pt_gamma_shape,
+ test_pt_gamma_scale);
+}
diff --git a/deps/jemalloc/test/unit/mq.c b/deps/jemalloc/test/unit/mq.c
new file mode 100644
index 0000000..57a4d54
--- /dev/null
+++ b/deps/jemalloc/test/unit/mq.c
@@ -0,0 +1,89 @@
+#include "test/jemalloc_test.h"
+
+#define NSENDERS 3
+#define NMSGS 100000
+
+typedef struct mq_msg_s mq_msg_t;
+struct mq_msg_s {
+ mq_msg(mq_msg_t) link;
+};
+mq_gen(static, mq_, mq_t, mq_msg_t, link)
+
+TEST_BEGIN(test_mq_basic) {
+ mq_t mq;
+ mq_msg_t msg;
+
+ assert_false(mq_init(&mq), "Unexpected mq_init() failure");
+ assert_u_eq(mq_count(&mq), 0, "mq should be empty");
+ assert_ptr_null(mq_tryget(&mq),
+ "mq_tryget() should fail when the queue is empty");
+
+ mq_put(&mq, &msg);
+ assert_u_eq(mq_count(&mq), 1, "mq should contain one message");
+ assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
+
+ mq_put(&mq, &msg);
+ assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
+
+ mq_fini(&mq);
+}
+TEST_END
+
+static void *
+thd_receiver_start(void *arg) {
+ mq_t *mq = (mq_t *)arg;
+ unsigned i;
+
+ for (i = 0; i < (NSENDERS * NMSGS); i++) {
+ mq_msg_t *msg = mq_get(mq);
+ assert_ptr_not_null(msg, "mq_get() should never return NULL");
+ dallocx(msg, 0);
+ }
+ return NULL;
+}
+
+static void *
+thd_sender_start(void *arg) {
+ mq_t *mq = (mq_t *)arg;
+ unsigned i;
+
+ for (i = 0; i < NMSGS; i++) {
+ mq_msg_t *msg;
+ void *p;
+ p = mallocx(sizeof(mq_msg_t), 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ msg = (mq_msg_t *)p;
+ mq_put(mq, msg);
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_mq_threaded) {
+ mq_t mq;
+ thd_t receiver;
+ thd_t senders[NSENDERS];
+ unsigned i;
+
+ assert_false(mq_init(&mq), "Unexpected mq_init() failure");
+
+ thd_create(&receiver, thd_receiver_start, (void *)&mq);
+ for (i = 0; i < NSENDERS; i++) {
+ thd_create(&senders[i], thd_sender_start, (void *)&mq);
+ }
+
+ thd_join(receiver, NULL);
+ for (i = 0; i < NSENDERS; i++) {
+ thd_join(senders[i], NULL);
+ }
+
+ mq_fini(&mq);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_mq_basic,
+ test_mq_threaded);
+}
+
diff --git a/deps/jemalloc/test/unit/mtx.c b/deps/jemalloc/test/unit/mtx.c
new file mode 100644
index 0000000..424587b
--- /dev/null
+++ b/deps/jemalloc/test/unit/mtx.c
@@ -0,0 +1,57 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 2
+#define NINCRS 2000000
+
+TEST_BEGIN(test_mtx_basic) {
+ mtx_t mtx;
+
+ assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
+ mtx_lock(&mtx);
+ mtx_unlock(&mtx);
+ mtx_fini(&mtx);
+}
+TEST_END
+
+typedef struct {
+ mtx_t mtx;
+ unsigned x;
+} thd_start_arg_t;
+
+static void *
+thd_start(void *varg) {
+ thd_start_arg_t *arg = (thd_start_arg_t *)varg;
+ unsigned i;
+
+ for (i = 0; i < NINCRS; i++) {
+ mtx_lock(&arg->mtx);
+ arg->x++;
+ mtx_unlock(&arg->mtx);
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_mtx_race) {
+ thd_start_arg_t arg;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
+ arg.x = 0;
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start, (void *)&arg);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+ assert_u_eq(arg.x, NTHREADS * NINCRS,
+ "Race-related counter corruption");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_mtx_basic,
+ test_mtx_race);
+}
diff --git a/deps/jemalloc/test/unit/nstime.c b/deps/jemalloc/test/unit/nstime.c
new file mode 100644
index 0000000..f313780
--- /dev/null
+++ b/deps/jemalloc/test/unit/nstime.c
@@ -0,0 +1,249 @@
+#include "test/jemalloc_test.h"
+
+#define BILLION UINT64_C(1000000000)
+
+TEST_BEGIN(test_nstime_init) {
+ nstime_t nst;
+
+ nstime_init(&nst, 42000000043);
+ assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
+ assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+ assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_init2) {
+ nstime_t nst;
+
+ nstime_init2(&nst, 42, 43);
+ assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+ assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_copy) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_init(&nstb, 0);
+ nstime_copy(&nstb, &nsta);
+ assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
+ assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_compare) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
+ assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
+
+ nstime_init2(&nstb, 42, 42);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 1,
+ "nsta should be greater than nstb");
+ assert_d_eq(nstime_compare(&nstb, &nsta), -1,
+ "nstb should be less than nsta");
+
+ nstime_init2(&nstb, 42, 44);
+ assert_d_eq(nstime_compare(&nsta, &nstb), -1,
+ "nsta should be less than nstb");
+ assert_d_eq(nstime_compare(&nstb, &nsta), 1,
+ "nstb should be greater than nsta");
+
+ nstime_init2(&nstb, 41, BILLION - 1);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 1,
+ "nsta should be greater than nstb");
+ assert_d_eq(nstime_compare(&nstb, &nsta), -1,
+ "nstb should be less than nsta");
+
+ nstime_init2(&nstb, 43, 0);
+ assert_d_eq(nstime_compare(&nsta, &nstb), -1,
+ "nsta should be less than nstb");
+ assert_d_eq(nstime_compare(&nstb, &nsta), 1,
+ "nstb should be greater than nsta");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_add) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_add(&nsta, &nstb);
+ nstime_init2(&nstb, 84, 86);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+
+ nstime_init2(&nsta, 42, BILLION - 1);
+ nstime_copy(&nstb, &nsta);
+ nstime_add(&nsta, &nstb);
+ nstime_init2(&nstb, 85, BILLION - 2);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_iadd) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, BILLION - 1);
+ nstime_iadd(&nsta, 1);
+ nstime_init2(&nstb, 43, 0);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+
+ nstime_init2(&nsta, 42, 1);
+ nstime_iadd(&nsta, BILLION + 1);
+ nstime_init2(&nstb, 43, 2);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect addition result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_subtract) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_subtract(&nsta, &nstb);
+ nstime_init(&nstb, 0);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_init2(&nstb, 41, 44);
+ nstime_subtract(&nsta, &nstb);
+ nstime_init2(&nstb, 0, BILLION - 1);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_isubtract) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_isubtract(&nsta, 42*BILLION + 43);
+ nstime_init(&nstb, 0);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_isubtract(&nsta, 41*BILLION + 44);
+ nstime_init2(&nstb, 0, BILLION - 1);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect subtraction result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_imultiply) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_imultiply(&nsta, 10);
+ nstime_init2(&nstb, 420, 430);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect multiplication result");
+
+ nstime_init2(&nsta, 42, 666666666);
+ nstime_imultiply(&nsta, 3);
+ nstime_init2(&nstb, 127, 999999998);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect multiplication result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_idivide) {
+ nstime_t nsta, nstb;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_idivide(&nsta, 10);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 666666666);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 3);
+ nstime_idivide(&nsta, 3);
+ assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ "Incorrect division result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_divide) {
+ nstime_t nsta, nstb, nstc;
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_init(&nstc, 1);
+ nstime_add(&nsta, &nstc);
+ assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
+ "Incorrect division result");
+
+ nstime_init2(&nsta, 42, 43);
+ nstime_copy(&nstb, &nsta);
+ nstime_imultiply(&nsta, 10);
+ nstime_init(&nstc, 1);
+ nstime_subtract(&nsta, &nstc);
+ assert_u64_eq(nstime_divide(&nsta, &nstb), 9,
+ "Incorrect division result");
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_monotonic) {
+ nstime_monotonic();
+}
+TEST_END
+
+TEST_BEGIN(test_nstime_update) {
+ nstime_t nst;
+
+ nstime_init(&nst, 0);
+
+ assert_false(nstime_update(&nst), "Basic time update failed.");
+
+ /* Only Rip Van Winkle sleeps this long. */
+ {
+ nstime_t addend;
+ nstime_init2(&addend, 631152000, 0);
+ nstime_add(&nst, &addend);
+ }
+ {
+ nstime_t nst0;
+ nstime_copy(&nst0, &nst);
+ assert_true(nstime_update(&nst),
+ "Update should detect time roll-back.");
+ assert_d_eq(nstime_compare(&nst, &nst0), 0,
+ "Time should not have been modified");
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_nstime_init,
+ test_nstime_init2,
+ test_nstime_copy,
+ test_nstime_compare,
+ test_nstime_add,
+ test_nstime_iadd,
+ test_nstime_subtract,
+ test_nstime_isubtract,
+ test_nstime_imultiply,
+ test_nstime_idivide,
+ test_nstime_divide,
+ test_nstime_monotonic,
+ test_nstime_update);
+}
diff --git a/deps/jemalloc/test/unit/pack.c b/deps/jemalloc/test/unit/pack.c
new file mode 100644
index 0000000..fc188b0
--- /dev/null
+++ b/deps/jemalloc/test/unit/pack.c
@@ -0,0 +1,166 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Size class that is a divisor of the page size, ideally 4+ regions per run.
+ */
+#if LG_PAGE <= 14
+#define SZ (ZU(1) << (LG_PAGE - 2))
+#else
+#define SZ ZU(4096)
+#endif
+
+/*
+ * Number of slabs to consume at high water mark. Should be at least 2 so that
+ * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
+ * tested.
+ */
+#define NSLABS 8
+
+static unsigned
+binind_compute(void) {
+ size_t sz;
+ unsigned nbins, i;
+
+ sz = sizeof(nbins);
+ assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ for (i = 0; i < nbins; i++) {
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ size_t size;
+
+ assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
+ &miblen), 0, "Unexpected mallctlnametomb failure");
+ mib[2] = (size_t)i;
+
+ sz = sizeof(size);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
+ 0), 0, "Unexpected mallctlbymib failure");
+ if (size == SZ) {
+ return i;
+ }
+ }
+
+ test_fail("Unable to compute nregs_per_run");
+ return 0;
+}
+
+static size_t
+nregs_per_run_compute(void) {
+ uint32_t nregs;
+ size_t sz;
+ unsigned binind = binind_compute();
+ size_t mib[4];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+
+ assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+ "Unexpected mallctlnametomb failure");
+ mib[2] = (size_t)binind;
+ sz = sizeof(nregs);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
+ 0), 0, "Unexpected mallctlbymib failure");
+ return nregs;
+}
+
+static unsigned
+arenas_create_mallctl(void) {
+ unsigned arena_ind;
+ size_t sz;
+
+ sz = sizeof(arena_ind);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Error in arenas.create");
+
+ return arena_ind;
+}
+
+static void
+arena_reset_mallctl(unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+
+ assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+TEST_BEGIN(test_pack) {
+ bool prof_enabled;
+ size_t sz = sizeof(prof_enabled);
+ if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) {
+ test_skip_if(prof_enabled);
+ }
+
+ unsigned arena_ind = arenas_create_mallctl();
+ size_t nregs_per_run = nregs_per_run_compute();
+ size_t nregs = nregs_per_run * NSLABS;
+ VARIABLE_ARRAY(void *, ptrs, nregs);
+ size_t i, j, offset;
+
+ /* Fill matrix. */
+ for (i = offset = 0; i < NSLABS; i++) {
+ for (j = 0; j < nregs_per_run; j++) {
+ void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
+ MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
+ " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
+ SZ, arena_ind, i, j);
+ ptrs[(i * nregs_per_run) + j] = p;
+ }
+ }
+
+ /*
+ * Free all but one region of each run, but rotate which region is
+ * preserved, so that subsequent allocations exercise the within-run
+ * layout policy.
+ */
+ offset = 0;
+ for (i = offset = 0;
+ i < NSLABS;
+ i++, offset = (offset + 1) % nregs_per_run) {
+ for (j = 0; j < nregs_per_run; j++) {
+ void *p = ptrs[(i * nregs_per_run) + j];
+ if (offset == j) {
+ continue;
+ }
+ dallocx(p, MALLOCX_ARENA(arena_ind) |
+ MALLOCX_TCACHE_NONE);
+ }
+ }
+
+ /*
+ * Logically refill matrix, skipping preserved regions and verifying
+ * that the matrix is unmodified.
+ */
+ offset = 0;
+ for (i = offset = 0;
+ i < NSLABS;
+ i++, offset = (offset + 1) % nregs_per_run) {
+ for (j = 0; j < nregs_per_run; j++) {
+ void *p;
+
+ if (offset == j) {
+ continue;
+ }
+ p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
+ MALLOCX_TCACHE_NONE);
+ assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
+ "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
+ i, j);
+ }
+ }
+
+ /* Clean up. */
+ arena_reset_mallctl(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_pack);
+}
diff --git a/deps/jemalloc/test/unit/pack.sh b/deps/jemalloc/test/unit/pack.sh
new file mode 100644
index 0000000..6f45148
--- /dev/null
+++ b/deps/jemalloc/test/unit/pack.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+# Immediately purge to minimize fragmentation.
+export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0"
diff --git a/deps/jemalloc/test/unit/pages.c b/deps/jemalloc/test/unit/pages.c
new file mode 100644
index 0000000..ee729ee
--- /dev/null
+++ b/deps/jemalloc/test/unit/pages.c
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_pages_huge) {
+ size_t alloc_size;
+ bool commit;
+ void *pages, *hugepage;
+
+ alloc_size = HUGEPAGE * 2 - PAGE;
+ commit = true;
+ pages = pages_map(NULL, alloc_size, PAGE, &commit);
+ assert_ptr_not_null(pages, "Unexpected pages_map() error");
+
+ if (init_system_thp_mode == thp_mode_default) {
+ hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));
+ assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge,
+ "Unexpected pages_huge() result");
+ assert_false(pages_nohuge(hugepage, HUGEPAGE),
+ "Unexpected pages_nohuge() result");
+ }
+
+ pages_unmap(pages, alloc_size);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_pages_huge);
+}
diff --git a/deps/jemalloc/test/unit/ph.c b/deps/jemalloc/test/unit/ph.c
new file mode 100644
index 0000000..88bf56f
--- /dev/null
+++ b/deps/jemalloc/test/unit/ph.c
@@ -0,0 +1,318 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ph.h"
+
+typedef struct node_s node_t;
+
+struct node_s {
+#define NODE_MAGIC 0x9823af7e
+ uint32_t magic;
+ phn(node_t) link;
+ uint64_t key;
+};
+
+static int
+node_cmp(const node_t *a, const node_t *b) {
+ int ret;
+
+ ret = (a->key > b->key) - (a->key < b->key);
+ if (ret == 0) {
+ /*
+ * Duplicates are not allowed in the heap, so force an
+ * arbitrary ordering for non-identical items with equal keys.
+ */
+ ret = (((uintptr_t)a) > ((uintptr_t)b))
+ - (((uintptr_t)a) < ((uintptr_t)b));
+ }
+ return ret;
+}
+
+static int
+node_cmp_magic(const node_t *a, const node_t *b) {
+
+ assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+
+ return node_cmp(a, b);
+}
+
+typedef ph(node_t) heap_t;
+ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic);
+
+static void
+node_print(const node_t *node, unsigned depth) {
+ unsigned i;
+ node_t *leftmost_child, *sibling;
+
+ for (i = 0; i < depth; i++) {
+ malloc_printf("\t");
+ }
+ malloc_printf("%2"FMTu64"\n", node->key);
+
+ leftmost_child = phn_lchild_get(node_t, link, node);
+ if (leftmost_child == NULL) {
+ return;
+ }
+ node_print(leftmost_child, depth + 1);
+
+ for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
+ NULL; sibling = phn_next_get(node_t, link, sibling)) {
+ node_print(sibling, depth + 1);
+ }
+}
+
+static void
+heap_print(const heap_t *heap) {
+ node_t *auxelm;
+
+ malloc_printf("vvv heap %p vvv\n", heap);
+ if (heap->ph_root == NULL) {
+ goto label_return;
+ }
+
+ node_print(heap->ph_root, 0);
+
+ for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
+ auxelm = phn_next_get(node_t, link, auxelm)) {
+ assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
+ link, auxelm)), auxelm,
+ "auxelm's prev doesn't link to auxelm");
+ node_print(auxelm, 0);
+ }
+
+label_return:
+ malloc_printf("^^^ heap %p ^^^\n", heap);
+}
+
+static unsigned
+node_validate(const node_t *node, const node_t *parent) {
+ unsigned nnodes = 1;
+ node_t *leftmost_child, *sibling;
+
+ if (parent != NULL) {
+ assert_d_ge(node_cmp_magic(node, parent), 0,
+ "Child is less than parent");
+ }
+
+ leftmost_child = phn_lchild_get(node_t, link, node);
+ if (leftmost_child == NULL) {
+ return nnodes;
+ }
+ assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
+ (void *)node, "Leftmost child does not link to node");
+ nnodes += node_validate(leftmost_child, node);
+
+ for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
+ NULL; sibling = phn_next_get(node_t, link, sibling)) {
+ assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
+ link, sibling)), sibling,
+ "sibling's prev doesn't link to sibling");
+ nnodes += node_validate(sibling, node);
+ }
+ return nnodes;
+}
+
+static unsigned
+heap_validate(const heap_t *heap) {
+ unsigned nnodes = 0;
+ node_t *auxelm;
+
+ if (heap->ph_root == NULL) {
+ goto label_return;
+ }
+
+ nnodes += node_validate(heap->ph_root, NULL);
+
+ for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
+ auxelm = phn_next_get(node_t, link, auxelm)) {
+ assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
+ link, auxelm)), auxelm,
+ "auxelm's prev doesn't link to auxelm");
+ nnodes += node_validate(auxelm, NULL);
+ }
+
+label_return:
+ if (false) {
+ heap_print(heap);
+ }
+ return nnodes;
+}
+
+TEST_BEGIN(test_ph_empty) {
+ heap_t heap;
+
+ heap_new(&heap);
+ assert_true(heap_empty(&heap), "Heap should be empty");
+ assert_ptr_null(heap_first(&heap), "Unexpected node");
+ assert_ptr_null(heap_any(&heap), "Unexpected node");
+}
+TEST_END
+
+static void
+node_remove(heap_t *heap, node_t *node) {
+ heap_remove(heap, node);
+
+ node->magic = 0;
+}
+
+static node_t *
+node_remove_first(heap_t *heap) {
+ node_t *node = heap_remove_first(heap);
+ node->magic = 0;
+ return node;
+}
+
+static node_t *
+node_remove_any(heap_t *heap) {
+ node_t *node = heap_remove_any(heap);
+ node->magic = 0;
+ return node;
+}
+
+TEST_BEGIN(test_ph_random) {
+#define NNODES 25
+#define NBAGS 250
+#define SEED 42
+ sfmt_t *sfmt;
+ uint64_t bag[NNODES];
+ heap_t heap;
+ node_t nodes[NNODES];
+ unsigned i, j, k;
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 0; i < NBAGS; i++) {
+ switch (i) {
+ case 0:
+ /* Insert in order. */
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = j;
+ }
+ break;
+ case 1:
+ /* Insert in reverse order. */
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = NNODES - j - 1;
+ }
+ break;
+ default:
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
+ }
+
+ for (j = 1; j <= NNODES; j++) {
+ /* Initialize heap and nodes. */
+ heap_new(&heap);
+ assert_u_eq(heap_validate(&heap), 0,
+ "Incorrect node count");
+ for (k = 0; k < j; k++) {
+ nodes[k].magic = NODE_MAGIC;
+ nodes[k].key = bag[k];
+ }
+
+ /* Insert nodes. */
+ for (k = 0; k < j; k++) {
+ heap_insert(&heap, &nodes[k]);
+ if (i % 13 == 12) {
+ assert_ptr_not_null(heap_any(&heap),
+ "Heap should not be empty");
+ /* Trigger merging. */
+ assert_ptr_not_null(heap_first(&heap),
+ "Heap should not be empty");
+ }
+ assert_u_eq(heap_validate(&heap), k + 1,
+ "Incorrect node count");
+ }
+
+ assert_false(heap_empty(&heap),
+ "Heap should not be empty");
+
+ /* Remove nodes. */
+ switch (i % 6) {
+ case 0:
+ for (k = 0; k < j; k++) {
+ assert_u_eq(heap_validate(&heap), j - k,
+ "Incorrect node count");
+ node_remove(&heap, &nodes[k]);
+ assert_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ }
+ break;
+ case 1:
+ for (k = j; k > 0; k--) {
+ node_remove(&heap, &nodes[k-1]);
+ assert_u_eq(heap_validate(&heap), k - 1,
+ "Incorrect node count");
+ }
+ break;
+ case 2: {
+ node_t *prev = NULL;
+ for (k = 0; k < j; k++) {
+ node_t *node = node_remove_first(&heap);
+ assert_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ if (prev != NULL) {
+ assert_d_ge(node_cmp(node,
+ prev), 0,
+ "Bad removal order");
+ }
+ prev = node;
+ }
+ break;
+ } case 3: {
+ node_t *prev = NULL;
+ for (k = 0; k < j; k++) {
+ node_t *node = heap_first(&heap);
+ assert_u_eq(heap_validate(&heap), j - k,
+ "Incorrect node count");
+ if (prev != NULL) {
+ assert_d_ge(node_cmp(node,
+ prev), 0,
+ "Bad removal order");
+ }
+ node_remove(&heap, node);
+ assert_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ prev = node;
+ }
+ break;
+ } case 4: {
+ for (k = 0; k < j; k++) {
+ node_remove_any(&heap);
+ assert_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ }
+ break;
+ } case 5: {
+ for (k = 0; k < j; k++) {
+ node_t *node = heap_any(&heap);
+ assert_u_eq(heap_validate(&heap), j - k,
+ "Incorrect node count");
+ node_remove(&heap, node);
+ assert_u_eq(heap_validate(&heap), j - k
+ - 1, "Incorrect node count");
+ }
+ break;
+ } default:
+ not_reached();
+ }
+
+ assert_ptr_null(heap_first(&heap),
+ "Heap should be empty");
+ assert_ptr_null(heap_any(&heap),
+ "Heap should be empty");
+ assert_true(heap_empty(&heap), "Heap should be empty");
+ }
+ }
+ fini_gen_rand(sfmt);
+#undef NNODES
+#undef SEED
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_ph_empty,
+ test_ph_random);
+}
diff --git a/deps/jemalloc/test/unit/prng.c b/deps/jemalloc/test/unit/prng.c
new file mode 100644
index 0000000..b5795c2
--- /dev/null
+++ b/deps/jemalloc/test/unit/prng.c
@@ -0,0 +1,237 @@
+#include "test/jemalloc_test.h"
+
+static void
+test_prng_lg_range_u32(bool atomic) {
+ atomic_u32_t sa, sb;
+ uint32_t ra, rb;
+ unsigned lg_range;
+
+ atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
+ ra = prng_lg_range_u32(&sa, 32, atomic);
+ atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
+ rb = prng_lg_range_u32(&sa, 32, atomic);
+ assert_u32_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
+ rb = prng_lg_range_u32(&sb, 32, atomic);
+ assert_u32_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
+ ra = prng_lg_range_u32(&sa, 32, atomic);
+ rb = prng_lg_range_u32(&sa, 32, atomic);
+ assert_u32_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
+ ra = prng_lg_range_u32(&sa, 32, atomic);
+ for (lg_range = 31; lg_range > 0; lg_range--) {
+ atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
+ rb = prng_lg_range_u32(&sb, lg_range, atomic);
+ assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ assert_u32_eq(rb, (ra >> (32 - lg_range)),
+ "Expected high order bits of full-width result, "
+ "lg_range=%u", lg_range);
+ }
+}
+
+static void
+test_prng_lg_range_u64(void) {
+ uint64_t sa, sb, ra, rb;
+ unsigned lg_range;
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ sa = 42;
+ rb = prng_lg_range_u64(&sa, 64);
+ assert_u64_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ sb = 42;
+ rb = prng_lg_range_u64(&sb, 64);
+ assert_u64_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ rb = prng_lg_range_u64(&sa, 64);
+ assert_u64_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ sa = 42;
+ ra = prng_lg_range_u64(&sa, 64);
+ for (lg_range = 63; lg_range > 0; lg_range--) {
+ sb = 42;
+ rb = prng_lg_range_u64(&sb, lg_range);
+ assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ assert_u64_eq(rb, (ra >> (64 - lg_range)),
+ "Expected high order bits of full-width result, "
+ "lg_range=%u", lg_range);
+ }
+}
+
+static void
+test_prng_lg_range_zu(bool atomic) {
+ atomic_zu_t sa, sb;
+ size_t ra, rb;
+ unsigned lg_range;
+
+ atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
+ rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ assert_zu_eq(ra, rb,
+ "Repeated generation should produce repeated results");
+
+ atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
+ rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ assert_zu_eq(ra, rb,
+ "Equivalent generation should produce equivalent results");
+
+ atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ assert_zu_ne(ra, rb,
+ "Full-width results must not immediately repeat");
+
+ atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
+ lg_range--) {
+ atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
+ rb = prng_lg_range_zu(&sb, lg_range, atomic);
+ assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
+ 0, "High order bits should be 0, lg_range=%u", lg_range);
+ assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
+ lg_range)), "Expected high order bits of full-width "
+ "result, lg_range=%u", lg_range);
+ }
+}
+
+TEST_BEGIN(test_prng_lg_range_u32_nonatomic) {
+ test_prng_lg_range_u32(false);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_u32_atomic) {
+ test_prng_lg_range_u32(true);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_u64_nonatomic) {
+ test_prng_lg_range_u64();
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_zu_nonatomic) {
+ test_prng_lg_range_zu(false);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_lg_range_zu_atomic) {
+ test_prng_lg_range_zu(true);
+}
+TEST_END
+
+static void
+test_prng_range_u32(bool atomic) {
+ uint32_t range;
+#define MAX_RANGE 10000000
+#define RANGE_STEP 97
+#define NREPS 10
+
+ for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
+ atomic_u32_t s;
+ unsigned rep;
+
+ atomic_store_u32(&s, range, ATOMIC_RELAXED);
+ for (rep = 0; rep < NREPS; rep++) {
+ uint32_t r = prng_range_u32(&s, range, atomic);
+
+ assert_u32_lt(r, range, "Out of range");
+ }
+ }
+}
+
+static void
+test_prng_range_u64(void) {
+ uint64_t range;
+#define MAX_RANGE 10000000
+#define RANGE_STEP 97
+#define NREPS 10
+
+ for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
+ uint64_t s;
+ unsigned rep;
+
+ s = range;
+ for (rep = 0; rep < NREPS; rep++) {
+ uint64_t r = prng_range_u64(&s, range);
+
+ assert_u64_lt(r, range, "Out of range");
+ }
+ }
+}
+
+static void
+test_prng_range_zu(bool atomic) {
+ size_t range;
+#define MAX_RANGE 10000000
+#define RANGE_STEP 97
+#define NREPS 10
+
+ for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
+ atomic_zu_t s;
+ unsigned rep;
+
+ atomic_store_zu(&s, range, ATOMIC_RELAXED);
+ for (rep = 0; rep < NREPS; rep++) {
+ size_t r = prng_range_zu(&s, range, atomic);
+
+ assert_zu_lt(r, range, "Out of range");
+ }
+ }
+}
+
+TEST_BEGIN(test_prng_range_u32_nonatomic) {
+ test_prng_range_u32(false);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_u32_atomic) {
+ test_prng_range_u32(true);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_u64_nonatomic) {
+ test_prng_range_u64();
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_zu_nonatomic) {
+ test_prng_range_zu(false);
+}
+TEST_END
+
+TEST_BEGIN(test_prng_range_zu_atomic) {
+ test_prng_range_zu(true);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_prng_lg_range_u32_nonatomic,
+ test_prng_lg_range_u32_atomic,
+ test_prng_lg_range_u64_nonatomic,
+ test_prng_lg_range_zu_nonatomic,
+ test_prng_lg_range_zu_atomic,
+ test_prng_range_u32_nonatomic,
+ test_prng_range_u32_atomic,
+ test_prng_range_u64_nonatomic,
+ test_prng_range_zu_nonatomic,
+ test_prng_range_zu_atomic);
+}
diff --git a/deps/jemalloc/test/unit/prof_accum.c b/deps/jemalloc/test/unit/prof_accum.c
new file mode 100644
index 0000000..2522006
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_accum.c
@@ -0,0 +1,81 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD 50
+#define DUMP_INTERVAL 1
+#define BT_COUNT_CHECK_INTERVAL 5
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename) {
+ int fd;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+static void *
+alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) {
+ return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration);
+}
+
+static void *
+thd_start(void *varg) {
+ unsigned thd_ind = *(unsigned *)varg;
+ size_t bt_count_prev, bt_count;
+ unsigned i_prev, i;
+
+ i_prev = 0;
+ bt_count_prev = 0;
+ for (i = 0; i < NALLOCS_PER_THREAD; i++) {
+ void *p = alloc_from_permuted_backtrace(thd_ind, i);
+ dallocx(p, 0);
+ if (i % DUMP_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ }
+
+ if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
+ i+1 == NALLOCS_PER_THREAD) {
+ bt_count = prof_bt_count();
+ assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
+ "Expected larger backtrace count increase");
+ i_prev = i;
+ bt_count_prev = bt_count;
+ }
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_idump) {
+ bool active;
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ sizeof(active)), 0,
+ "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_idump);
+}
diff --git a/deps/jemalloc/test/unit/prof_accum.sh b/deps/jemalloc/test/unit/prof_accum.sh
new file mode 100644
index 0000000..b3e13fc
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_accum.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_active.c b/deps/jemalloc/test/unit/prof_active.c
new file mode 100644
index 0000000..850a24a
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_active.c
@@ -0,0 +1,117 @@
+#include "test/jemalloc_test.h"
+
+static void
+mallctl_bool_get(const char *name, bool expected, const char *func, int line) {
+ bool old;
+ size_t sz;
+
+ sz = sizeof(old);
+ assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
+ "%s():%d: Unexpected mallctl failure reading %s", func, line, name);
+ assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
+ name);
+}
+
+static void
+mallctl_bool_set(const char *name, bool old_expected, bool val_new,
+ const char *func, int line) {
+ bool old;
+ size_t sz;
+
+ sz = sizeof(old);
+ assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
+ sizeof(val_new)), 0,
+ "%s():%d: Unexpected mallctl failure reading/writing %s", func,
+ line, name);
+ assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
+ line, name);
+}
+
+static void
+mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
+ int line) {
+ mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
+}
+#define mallctl_prof_active_get(a) \
+ mallctl_prof_active_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_prof_active_set_impl(bool prof_active_old_expected,
+ bool prof_active_new, const char *func, int line) {
+ mallctl_bool_set("prof.active", prof_active_old_expected,
+ prof_active_new, func, line);
+}
+#define mallctl_prof_active_set(a, b) \
+ mallctl_prof_active_set_impl(a, b, __func__, __LINE__)
+
+static void
+mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
+ const char *func, int line) {
+ mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
+ func, line);
+}
+#define mallctl_thread_prof_active_get(a) \
+ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
+ bool thread_prof_active_new, const char *func, int line) {
+ mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
+ thread_prof_active_new, func, line);
+}
+#define mallctl_thread_prof_active_set(a, b) \
+ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
+
+static void
+prof_sampling_probe_impl(bool expect_sample, const char *func, int line) {
+ void *p;
+ size_t expected_backtraces = expect_sample ? 1 : 0;
+
+ assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
+ line);
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_zu_eq(prof_bt_count(), expected_backtraces,
+ "%s():%d: Unexpected backtrace count", func, line);
+ dallocx(p, 0);
+}
+#define prof_sampling_probe(a) \
+ prof_sampling_probe_impl(a, __func__, __LINE__)
+
+TEST_BEGIN(test_prof_active) {
+ test_skip_if(!config_prof);
+
+ mallctl_prof_active_get(true);
+ mallctl_thread_prof_active_get(false);
+
+ mallctl_prof_active_set(true, true);
+ mallctl_thread_prof_active_set(false, false);
+ /* prof.active, !thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(true, false);
+ mallctl_thread_prof_active_set(false, false);
+ /* !prof.active, !thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(false, false);
+ mallctl_thread_prof_active_set(false, true);
+ /* !prof.active, thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(false, true);
+ mallctl_thread_prof_active_set(true, true);
+ /* prof.active, thread.prof.active. */
+ prof_sampling_probe(true);
+
+ /* Restore settings. */
+ mallctl_prof_active_set(true, true);
+ mallctl_thread_prof_active_set(true, false);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_prof_active);
+}
diff --git a/deps/jemalloc/test/unit/prof_active.sh b/deps/jemalloc/test/unit/prof_active.sh
new file mode 100644
index 0000000..0167cb1
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_active.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_thread_active_init:false,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_gdump.c b/deps/jemalloc/test/unit/prof_gdump.c
new file mode 100644
index 0000000..f7e0aac
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_gdump.c
@@ -0,0 +1,74 @@
+#include "test/jemalloc_test.h"
+
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename) {
+ int fd;
+
+ did_prof_dump_open = true;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+TEST_BEGIN(test_gdump) {
+ bool active, gdump, gdump_old;
+ void *p, *q, *r, *s;
+ size_t sz;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ sizeof(active)), 0,
+ "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ did_prof_dump_open = false;
+ p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
+ did_prof_dump_open = false;
+ q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
+ gdump = false;
+ sz = sizeof(gdump_old);
+ assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
+ (void *)&gdump, sizeof(gdump)), 0,
+ "Unexpected mallctl failure while disabling prof.gdump");
+ assert(gdump_old);
+ did_prof_dump_open = false;
+ r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_false(did_prof_dump_open, "Unexpected profile dump");
+
+ gdump = true;
+ sz = sizeof(gdump_old);
+ assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
+ (void *)&gdump, sizeof(gdump)), 0,
+ "Unexpected mallctl failure while enabling prof.gdump");
+ assert(!gdump_old);
+ did_prof_dump_open = false;
+ s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
+ dallocx(p, 0);
+ dallocx(q, 0);
+ dallocx(r, 0);
+ dallocx(s, 0);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_gdump);
+}
diff --git a/deps/jemalloc/test/unit/prof_gdump.sh b/deps/jemalloc/test/unit/prof_gdump.sh
new file mode 100644
index 0000000..3f600d2
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_gdump.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true"
+fi
+
diff --git a/deps/jemalloc/test/unit/prof_idump.c b/deps/jemalloc/test/unit/prof_idump.c
new file mode 100644
index 0000000..1cc6c98
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_idump.c
@@ -0,0 +1,42 @@
+#include "test/jemalloc_test.h"
+
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename) {
+ int fd;
+
+ did_prof_dump_open = true;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+TEST_BEGIN(test_idump) {
+ bool active;
+ void *p;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ sizeof(active)), 0,
+ "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ did_prof_dump_open = false;
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, 0);
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_idump);
+}
diff --git a/deps/jemalloc/test/unit/prof_idump.sh b/deps/jemalloc/test/unit/prof_idump.sh
new file mode 100644
index 0000000..4dc599a
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_idump.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+export MALLOC_CONF="tcache:false"
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0"
+fi
+
+
diff --git a/deps/jemalloc/test/unit/prof_log.c b/deps/jemalloc/test/unit/prof_log.c
new file mode 100644
index 0000000..92fbd7c
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_log.c
@@ -0,0 +1,148 @@
+#include "test/jemalloc_test.h"
+
+#define N_PARAM 100
+#define N_THREADS 10
+
+static void assert_rep() {
+ assert_b_eq(prof_log_rep_check(), false, "Rep check failed");
+}
+
+static void assert_log_empty() {
+ assert_zu_eq(prof_log_bt_count(), 0,
+ "The log has backtraces; it isn't empty");
+ assert_zu_eq(prof_log_thr_count(), 0,
+ "The log has threads; it isn't empty");
+ assert_zu_eq(prof_log_alloc_count(), 0,
+ "The log has allocations; it isn't empty");
+}
+
+void *buf[N_PARAM];
+
+static void f() {
+ int i;
+ for (i = 0; i < N_PARAM; i++) {
+ buf[i] = malloc(100);
+ }
+ for (i = 0; i < N_PARAM; i++) {
+ free(buf[i]);
+ }
+}
+
+TEST_BEGIN(test_prof_log_many_logs) {
+ int i;
+
+ test_skip_if(!config_prof);
+
+ for (i = 0; i < N_PARAM; i++) {
+ assert_b_eq(prof_log_is_logging(), false,
+ "Logging shouldn't have started yet");
+ assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when starting logging");
+ assert_b_eq(prof_log_is_logging(), true,
+ "Logging should be started by now");
+ assert_log_empty();
+ assert_rep();
+ f();
+ assert_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
+ assert_rep();
+ assert_b_eq(prof_log_is_logging(), true,
+ "Logging should still be on");
+ assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when stopping logging");
+ assert_b_eq(prof_log_is_logging(), false,
+ "Logging should have turned off");
+ }
+}
+TEST_END
+
+thd_t thr_buf[N_THREADS];
+
+static void *f_thread(void *unused) {
+ int i;
+ for (i = 0; i < N_PARAM; i++) {
+ void *p = malloc(100);
+ memset(p, 100, sizeof(char));
+ free(p);
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_prof_log_many_threads) {
+
+ test_skip_if(!config_prof);
+
+ int i;
+ assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when starting logging");
+ for (i = 0; i < N_THREADS; i++) {
+ thd_create(&thr_buf[i], &f_thread, NULL);
+ }
+
+ for (i = 0; i < N_THREADS; i++) {
+ thd_join(thr_buf[i], NULL);
+ }
+ assert_zu_eq(prof_log_thr_count(), N_THREADS,
+ "Wrong number of thread entries");
+ assert_rep();
+ assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when stopping logging");
+}
+TEST_END
+
+static void f3() {
+ void *p = malloc(100);
+ free(p);
+}
+
+static void f1() {
+ void *p = malloc(100);
+ f3();
+ free(p);
+}
+
+static void f2() {
+ void *p = malloc(100);
+ free(p);
+}
+
+TEST_BEGIN(test_prof_log_many_traces) {
+
+ test_skip_if(!config_prof);
+
+ assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when starting logging");
+ int i;
+ assert_rep();
+ assert_log_empty();
+ for (i = 0; i < N_PARAM; i++) {
+ assert_rep();
+ f1();
+ assert_rep();
+ f2();
+ assert_rep();
+ f3();
+ assert_rep();
+ }
+ /*
+ * There should be 8 total backtraces: two for malloc/free in f1(), two
+ * for malloc/free in f2(), two for malloc/free in f3(), and then two
+ * for malloc/free in f1()'s call to f3(). However compiler
+ * optimizations such as loop unrolling might generate more call sites.
+ * So >= 8 traces are expected.
+ */
+ assert_zu_ge(prof_log_bt_count(), 8,
+ "Expect at least 8 backtraces given sample workload");
+ assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure when stopping logging");
+}
+TEST_END
+
+int
+main(void) {
+ prof_log_dummy_set(true);
+ return test_no_reentrancy(
+ test_prof_log_many_logs,
+ test_prof_log_many_traces,
+ test_prof_log_many_threads);
+}
diff --git a/deps/jemalloc/test/unit/prof_log.sh b/deps/jemalloc/test/unit/prof_log.sh
new file mode 100644
index 0000000..8fcc7d8
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_log.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_reset.c b/deps/jemalloc/test/unit/prof_reset.c
new file mode 100644
index 0000000..7cce42d
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_reset.c
@@ -0,0 +1,286 @@
+#include "test/jemalloc_test.h"
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename) {
+ int fd;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+static void
+set_prof_active(bool active) {
+ assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ sizeof(active)), 0, "Unexpected mallctl failure");
+}
+
+static size_t
+get_lg_prof_sample(void) {
+ size_t lg_prof_sample;
+ size_t sz = sizeof(size_t);
+
+ assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
+ NULL, 0), 0,
+ "Unexpected mallctl failure while reading profiling sample rate");
+ return lg_prof_sample;
+}
+
+static void
+do_prof_reset(size_t lg_prof_sample) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL,
+ (void *)&lg_prof_sample, sizeof(size_t)), 0,
+ "Unexpected mallctl failure while resetting profile data");
+ assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
+ "Expected profile sample rate change");
+}
+
+TEST_BEGIN(test_prof_reset_basic) {
+ size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
+ size_t sz;
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
+ &sz, NULL, 0), 0,
+ "Unexpected mallctl failure while reading profiling sample rate");
+ assert_zu_eq(lg_prof_sample_orig, 0,
+ "Unexpected profiling sample rate");
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected disagreement between \"opt.lg_prof_sample\" and "
+ "\"prof.lg_sample\"");
+
+ /* Test simple resets. */
+ for (i = 0; i < 2; i++) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure while resetting profile data");
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected profile sample rate change");
+ }
+
+ /* Test resets with prof.lg_sample changes. */
+ lg_prof_sample_next = 1;
+ for (i = 0; i < 2; i++) {
+ do_prof_reset(lg_prof_sample_next);
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
+ "Expected profile sample rate change");
+ lg_prof_sample_next = lg_prof_sample_orig;
+ }
+
+ /* Make sure the test code restored prof.lg_sample. */
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected disagreement between \"opt.lg_prof_sample\" and "
+ "\"prof.lg_sample\"");
+}
+TEST_END
+
+bool prof_dump_header_intercepted = false;
+prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
+static bool
+prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
+ const prof_cnt_t *cnt_all) {
+ prof_dump_header_intercepted = true;
+ memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
+
+ return false;
+}
+
+TEST_BEGIN(test_prof_reset_cleanup) {
+ void *p;
+ prof_dump_header_t *prof_dump_header_orig;
+
+ test_skip_if(!config_prof);
+
+ set_prof_active(true);
+
+ assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
+
+ prof_dump_header_orig = prof_dump_header;
+ prof_dump_header = prof_dump_header_intercept;
+ assert_false(prof_dump_header_intercepted, "Unexpected intercept");
+
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ assert_true(prof_dump_header_intercepted, "Expected intercept");
+ assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
+
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected error while resetting heap profile data");
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
+ assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
+
+ prof_dump_header = prof_dump_header_orig;
+
+ dallocx(p, 0);
+ assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+
+ set_prof_active(false);
+}
+TEST_END
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD (1U << 13)
+#define OBJ_RING_BUF_COUNT 1531
+#define RESET_INTERVAL (1U << 10)
+#define DUMP_INTERVAL 3677
+static void *
+thd_start(void *varg) {
+ unsigned thd_ind = *(unsigned *)varg;
+ unsigned i;
+ void *objs[OBJ_RING_BUF_COUNT];
+
+ memset(objs, 0, sizeof(objs));
+
+ for (i = 0; i < NALLOCS_PER_THREAD; i++) {
+ if (i % RESET_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while resetting heap profile "
+ "data");
+ }
+
+ if (i % DUMP_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ }
+
+ {
+ void **pp = &objs[i % OBJ_RING_BUF_COUNT];
+ if (*pp != NULL) {
+ dallocx(*pp, 0);
+ *pp = NULL;
+ }
+ *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
+ assert_ptr_not_null(*pp,
+ "Unexpected btalloc() failure");
+ }
+ }
+
+ /* Clean up any remaining objects. */
+ for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
+ void **pp = &objs[i % OBJ_RING_BUF_COUNT];
+ if (*pp != NULL) {
+ dallocx(*pp, 0);
+ *pp = NULL;
+ }
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_prof_reset) {
+ size_t lg_prof_sample_orig;
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+ size_t bt_count, tdata_count;
+
+ test_skip_if(!config_prof);
+
+ bt_count = prof_bt_count();
+ assert_zu_eq(bt_count, 0,
+ "Unexpected pre-existing tdata structures");
+ tdata_count = prof_tdata_count();
+
+ lg_prof_sample_orig = get_lg_prof_sample();
+ do_prof_reset(5);
+
+ set_prof_active(true);
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+
+ assert_zu_eq(prof_bt_count(), bt_count,
+ "Unexpected bactrace count change");
+ assert_zu_eq(prof_tdata_count(), tdata_count,
+ "Unexpected remaining tdata structures");
+
+ set_prof_active(false);
+
+ do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NTHREADS
+#undef NALLOCS_PER_THREAD
+#undef OBJ_RING_BUF_COUNT
+#undef RESET_INTERVAL
+#undef DUMP_INTERVAL
+
+/* Test sampling at the same allocation site across resets. */
+#define NITER 10
+TEST_BEGIN(test_xallocx) {
+ size_t lg_prof_sample_orig;
+ unsigned i;
+ void *ptrs[NITER];
+
+ test_skip_if(!config_prof);
+
+ lg_prof_sample_orig = get_lg_prof_sample();
+ set_prof_active(true);
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ for (i = 0; i < NITER; i++) {
+ void *p;
+ size_t sz, nsz;
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ /* Allocate small object (which will be promoted). */
+ p = ptrs[i] = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ /* Perform successful xallocx(). */
+ sz = sallocx(p, 0);
+ assert_zu_eq(xallocx(p, sz, 0, 0), sz,
+ "Unexpected xallocx() failure");
+
+ /* Perform unsuccessful xallocx(). */
+ nsz = nallocx(sz+1, 0);
+ assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
+ "Unexpected xallocx() success");
+ }
+
+ for (i = 0; i < NITER; i++) {
+ /* dallocx. */
+ dallocx(ptrs[i], 0);
+ }
+
+ set_prof_active(false);
+ do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NITER
+
+int
+main(void) {
+ /* Intercept dumping prior to running any tests. */
+ prof_dump_open = prof_dump_open_intercept;
+
+ return test_no_reentrancy(
+ test_prof_reset_basic,
+ test_prof_reset_cleanup,
+ test_prof_reset,
+ test_xallocx);
+}
diff --git a/deps/jemalloc/test/unit/prof_reset.sh b/deps/jemalloc/test/unit/prof_reset.sh
new file mode 100644
index 0000000..43c516a
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_reset.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_tctx.c b/deps/jemalloc/test/unit/prof_tctx.c
new file mode 100644
index 0000000..ff3b2b0
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_tctx.c
@@ -0,0 +1,46 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_prof_realloc) {
+ tsdn_t *tsdn;
+ int flags;
+ void *p, *q;
+ prof_tctx_t *tctx_p, *tctx_q;
+ uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3;
+
+ test_skip_if(!config_prof);
+
+ tsdn = tsdn_fetch();
+ flags = MALLOCX_TCACHE_NONE;
+
+ prof_cnt_all(&curobjs_0, NULL, NULL, NULL);
+ p = mallocx(1024, flags);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ tctx_p = prof_tctx_get(tsdn, p, NULL);
+ assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U,
+ "Expected valid tctx");
+ prof_cnt_all(&curobjs_1, NULL, NULL, NULL);
+ assert_u64_eq(curobjs_0 + 1, curobjs_1,
+ "Allocation should have increased sample size");
+
+ q = rallocx(p, 2048, flags);
+ assert_ptr_ne(p, q, "Expected move");
+ assert_ptr_not_null(p, "Unexpected rmallocx() failure");
+ tctx_q = prof_tctx_get(tsdn, q, NULL);
+ assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U,
+ "Expected valid tctx");
+ prof_cnt_all(&curobjs_2, NULL, NULL, NULL);
+ assert_u64_eq(curobjs_1, curobjs_2,
+ "Reallocation should not have changed sample size");
+
+ dallocx(q, flags);
+ prof_cnt_all(&curobjs_3, NULL, NULL, NULL);
+ assert_u64_eq(curobjs_0, curobjs_3,
+ "Sample size should have returned to base level");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_prof_realloc);
+}
diff --git a/deps/jemalloc/test/unit/prof_tctx.sh b/deps/jemalloc/test/unit/prof_tctx.sh
new file mode 100644
index 0000000..8fcc7d8
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_tctx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/prof_thread_name.c b/deps/jemalloc/test/unit/prof_thread_name.c
new file mode 100644
index 0000000..c9c2a2b
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_thread_name.c
@@ -0,0 +1,120 @@
+#include "test/jemalloc_test.h"
+
+static void
+mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
+ int line) {
+ const char *thread_name_old;
+ size_t sz;
+
+ sz = sizeof(thread_name_old);
+ assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
+ NULL, 0), 0,
+ "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ func, line);
+ assert_str_eq(thread_name_old, thread_name_expected,
+ "%s():%d: Unexpected thread.prof.name value", func, line);
+}
+#define mallctl_thread_name_get(a) \
+ mallctl_thread_name_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_thread_name_set_impl(const char *thread_name, const char *func,
+ int line) {
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ (void *)&thread_name, sizeof(thread_name)), 0,
+ "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ func, line);
+ mallctl_thread_name_get_impl(thread_name, func, line);
+}
+#define mallctl_thread_name_set(a) \
+ mallctl_thread_name_set_impl(a, __func__, __LINE__)
+
+TEST_BEGIN(test_prof_thread_name_validation) {
+ const char *thread_name;
+
+ test_skip_if(!config_prof);
+
+ mallctl_thread_name_get("");
+ mallctl_thread_name_set("hi there");
+
+ /* NULL input shouldn't be allowed. */
+ thread_name = NULL;
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ (void *)&thread_name, sizeof(thread_name)), EFAULT,
+ "Unexpected mallctl result writing \"%s\" to thread.prof.name",
+ thread_name);
+
+ /* '\n' shouldn't be allowed. */
+ thread_name = "hi\nthere";
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ (void *)&thread_name, sizeof(thread_name)), EFAULT,
+ "Unexpected mallctl result writing \"%s\" to thread.prof.name",
+ thread_name);
+
+ /* Simultaneous read/write shouldn't be allowed. */
+ {
+ const char *thread_name_old;
+ size_t sz;
+
+ sz = sizeof(thread_name_old);
+ assert_d_eq(mallctl("thread.prof.name",
+ (void *)&thread_name_old, &sz, (void *)&thread_name,
+ sizeof(thread_name)), EPERM,
+ "Unexpected mallctl result writing \"%s\" to "
+ "thread.prof.name", thread_name);
+ }
+
+ mallctl_thread_name_set("");
+}
+TEST_END
+
+#define NTHREADS 4
+#define NRESET 25
+static void *
+thd_start(void *varg) {
+ unsigned thd_ind = *(unsigned *)varg;
+ char thread_name[16] = "";
+ unsigned i;
+
+ malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind);
+
+ mallctl_thread_name_get("");
+ mallctl_thread_name_set(thread_name);
+
+ for (i = 0; i < NRESET; i++) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected error while resetting heap profile data");
+ mallctl_thread_name_get(thread_name);
+ }
+
+ mallctl_thread_name_set(thread_name);
+ mallctl_thread_name_set("");
+
+ return NULL;
+}
+
+TEST_BEGIN(test_prof_thread_name_threaded) {
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+#undef NTHREADS
+#undef NRESET
+
+int
+main(void) {
+ return test(
+ test_prof_thread_name_validation,
+ test_prof_thread_name_threaded);
+}
diff --git a/deps/jemalloc/test/unit/prof_thread_name.sh b/deps/jemalloc/test/unit/prof_thread_name.sh
new file mode 100644
index 0000000..298c105
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_thread_name.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:false"
+fi
diff --git a/deps/jemalloc/test/unit/ql.c b/deps/jemalloc/test/unit/ql.c
new file mode 100644
index 0000000..b76c24c
--- /dev/null
+++ b/deps/jemalloc/test/unit/ql.c
@@ -0,0 +1,204 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ql.h"
+
+/* Number of ring entries, in [2..26]. */
+#define NENTRIES 9
+
+typedef struct list_s list_t;
+typedef ql_head(list_t) list_head_t;
+
+struct list_s {
+ ql_elm(list_t) link;
+ char id;
+};
+
+static void
+test_empty_list(list_head_t *head) {
+ list_t *t;
+ unsigned i;
+
+ assert_ptr_null(ql_first(head), "Unexpected element for empty list");
+ assert_ptr_null(ql_last(head, link),
+ "Unexpected element for empty list");
+
+ i = 0;
+ ql_foreach(t, head, link) {
+ i++;
+ }
+ assert_u_eq(i, 0, "Unexpected element for empty list");
+
+ i = 0;
+ ql_reverse_foreach(t, head, link) {
+ i++;
+ }
+ assert_u_eq(i, 0, "Unexpected element for empty list");
+}
+
+TEST_BEGIN(test_ql_empty) {
+ list_head_t head;
+
+ ql_new(&head);
+ test_empty_list(&head);
+}
+TEST_END
+
+static void
+init_entries(list_t *entries, unsigned nentries) {
+ unsigned i;
+
+ for (i = 0; i < nentries; i++) {
+ entries[i].id = 'a' + i;
+ ql_elm_new(&entries[i], link);
+ }
+}
+
+static void
+test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) {
+ list_t *t;
+ unsigned i;
+
+ assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
+ assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
+ "Element id mismatch");
+
+ i = 0;
+ ql_foreach(t, head, link) {
+ assert_c_eq(t->id, entries[i].id, "Element id mismatch");
+ i++;
+ }
+
+ i = 0;
+ ql_reverse_foreach(t, head, link) {
+ assert_c_eq(t->id, entries[nentries-i-1].id,
+ "Element id mismatch");
+ i++;
+ }
+
+ for (i = 0; i < nentries-1; i++) {
+ t = ql_next(head, &entries[i], link);
+ assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
+ }
+ assert_ptr_null(ql_next(head, &entries[nentries-1], link),
+ "Unexpected element");
+
+ assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
+ for (i = 1; i < nentries; i++) {
+ t = ql_prev(head, &entries[i], link);
+ assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
+ }
+}
+
+TEST_BEGIN(test_ql_tail_insert) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_tail_insert(&head, &entries[i], link);
+ }
+
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_tail_remove) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_tail_insert(&head, &entries[i], link);
+ }
+
+ for (i = 0; i < NENTRIES; i++) {
+ test_entries_list(&head, entries, NENTRIES-i);
+ ql_tail_remove(&head, list_t, link);
+ }
+ test_empty_list(&head);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_head_insert) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+ }
+
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_head_remove) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+ }
+
+ for (i = 0; i < NENTRIES; i++) {
+ test_entries_list(&head, &entries[i], NENTRIES-i);
+ ql_head_remove(&head, list_t, link);
+ }
+ test_empty_list(&head);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_insert) {
+ list_head_t head;
+ list_t entries[8];
+ list_t *a, *b, *c, *d, *e, *f, *g, *h;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ a = &entries[0];
+ b = &entries[1];
+ c = &entries[2];
+ d = &entries[3];
+ e = &entries[4];
+ f = &entries[5];
+ g = &entries[6];
+ h = &entries[7];
+
+ /*
+ * ql_remove(), ql_before_insert(), and ql_after_insert() are used
+ * internally by other macros that are already tested, so there's no
+ * need to test them completely. However, insertion/deletion from the
+ * middle of lists is not otherwise tested; do so here.
+ */
+ ql_tail_insert(&head, f, link);
+ ql_before_insert(&head, f, b, link);
+ ql_before_insert(&head, f, c, link);
+ ql_after_insert(f, h, link);
+ ql_after_insert(f, g, link);
+ ql_before_insert(&head, b, a, link);
+ ql_after_insert(c, d, link);
+ ql_before_insert(&head, f, e, link);
+
+ test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_ql_empty,
+ test_ql_tail_insert,
+ test_ql_tail_remove,
+ test_ql_head_insert,
+ test_ql_head_remove,
+ test_ql_insert);
+}
diff --git a/deps/jemalloc/test/unit/qr.c b/deps/jemalloc/test/unit/qr.c
new file mode 100644
index 0000000..271a109
--- /dev/null
+++ b/deps/jemalloc/test/unit/qr.c
@@ -0,0 +1,243 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/qr.h"
+
+/* Number of ring entries, in [2..26]. */
+#define NENTRIES 9
+/* Split index, in [1..NENTRIES). */
+#define SPLIT_INDEX 5
+
+typedef struct ring_s ring_t;
+
+struct ring_s {
+ qr(ring_t) link;
+ char id;
+};
+
+static void
+init_entries(ring_t *entries) {
+ unsigned i;
+
+ for (i = 0; i < NENTRIES; i++) {
+ qr_new(&entries[i], link);
+ entries[i].id = 'a' + i;
+ }
+}
+
+static void
+test_independent_entries(ring_t *entries) {
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ j++;
+ }
+ assert_u_eq(j, 1,
+ "Iteration over single-element ring should visit precisely "
+ "one element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ j++;
+ }
+ assert_u_eq(j, 1,
+ "Iteration over single-element ring should visit precisely "
+ "one element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_ptr_eq(t, &entries[i],
+ "Next element in single-element ring should be same as "
+ "current element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_ptr_eq(t, &entries[i],
+ "Previous element in single-element ring should be same as "
+ "current element");
+ }
+}
+
+TEST_BEGIN(test_qr_one) {
+ ring_t entries[NENTRIES];
+
+ init_entries(entries);
+ test_independent_entries(entries);
+}
+TEST_END
+
+static void
+test_entries_ring(ring_t *entries) {
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(i+j) % NENTRIES].id,
+ "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(NENTRIES+i-j-1) %
+ NENTRIES].id, "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+}
+
+TEST_BEGIN(test_qr_after_insert) {
+ ring_t entries[NENTRIES];
+ unsigned i;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++) {
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
+ test_entries_ring(entries);
+}
+TEST_END
+
+TEST_BEGIN(test_qr_remove) {
+ ring_t entries[NENTRIES];
+ ring_t *t;
+ unsigned i, j;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++) {
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[i+j].id,
+ "Element id mismatch");
+ j++;
+ }
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[NENTRIES - 1 - j].id,
+ "Element id mismatch");
+ j++;
+ }
+ qr_remove(&entries[i], link);
+ }
+ test_independent_entries(entries);
+}
+TEST_END
+
+TEST_BEGIN(test_qr_before_insert) {
+ ring_t entries[NENTRIES];
+ ring_t *t;
+ unsigned i, j;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++) {
+ qr_before_insert(&entries[i - 1], &entries[i], link);
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(NENTRIES+i-j) %
+ NENTRIES].id, "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
+ "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+}
+TEST_END
+
+static void
+test_split_entries(ring_t *entries) {
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ if (i < SPLIT_INDEX) {
+ assert_c_eq(t->id,
+ entries[(i+j) % SPLIT_INDEX].id,
+ "Element id mismatch");
+ } else {
+ assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
+ (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
+ "Element id mismatch");
+ }
+ j++;
+ }
+ }
+}
+
+TEST_BEGIN(test_qr_meld_split) {
+ ring_t entries[NENTRIES];
+ unsigned i;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++) {
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
+
+ qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
+ test_split_entries(entries);
+
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
+ test_entries_ring(entries);
+
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
+ test_split_entries(entries);
+
+ qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
+ test_entries_ring(entries);
+
+ qr_split(&entries[0], &entries[0], ring_t, link);
+ test_entries_ring(entries);
+
+ qr_meld(&entries[0], &entries[0], ring_t, link);
+ test_entries_ring(entries);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_qr_one,
+ test_qr_after_insert,
+ test_qr_remove,
+ test_qr_before_insert,
+ test_qr_meld_split);
+}
diff --git a/deps/jemalloc/test/unit/rb.c b/deps/jemalloc/test/unit/rb.c
new file mode 100644
index 0000000..65c0492
--- /dev/null
+++ b/deps/jemalloc/test/unit/rb.c
@@ -0,0 +1,355 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/rb.h"
+
+#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
+ a_type *rbp_bh_t; \
+ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \
+ NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \
+ rbp_bh_t)) { \
+ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
+ (r_height)++; \
+ } \
+ } \
+} while (0)
+
+typedef struct node_s node_t;
+
+struct node_s {
+#define NODE_MAGIC 0x9823af7e
+ uint32_t magic;
+ rb_node(node_t) link;
+ uint64_t key;
+};
+
+static int
+node_cmp(const node_t *a, const node_t *b) {
+ int ret;
+
+ assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+
+ ret = (a->key > b->key) - (a->key < b->key);
+ if (ret == 0) {
+ /*
+ * Duplicates are not allowed in the tree, so force an
+ * arbitrary ordering for non-identical items with equal keys.
+ */
+ ret = (((uintptr_t)a) > ((uintptr_t)b))
+ - (((uintptr_t)a) < ((uintptr_t)b));
+ }
+ return ret;
+}
+
+typedef rb_tree(node_t) tree_t;
+rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
+
+TEST_BEGIN(test_rb_empty) {
+ tree_t tree;
+ node_t key;
+
+ tree_new(&tree);
+
+ assert_true(tree_empty(&tree), "Tree should be empty");
+ assert_ptr_null(tree_first(&tree), "Unexpected node");
+ assert_ptr_null(tree_last(&tree), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
+}
+TEST_END
+
+static unsigned
+tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
+ unsigned ret = 0;
+ node_t *left_node;
+ node_t *right_node;
+
+ if (node == NULL) {
+ return ret;
+ }
+
+ left_node = rbtn_left_get(node_t, link, node);
+ right_node = rbtn_right_get(node_t, link, node);
+
+ if (!rbtn_red_get(node_t, link, node)) {
+ black_depth++;
+ }
+
+ /* Red nodes must be interleaved with black nodes. */
+ if (rbtn_red_get(node_t, link, node)) {
+ if (left_node != NULL) {
+ assert_false(rbtn_red_get(node_t, link, left_node),
+ "Node should be black");
+ }
+ if (right_node != NULL) {
+ assert_false(rbtn_red_get(node_t, link, right_node),
+ "Node should be black");
+ }
+ }
+
+ /* Self. */
+ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Left subtree. */
+ if (left_node != NULL) {
+ ret += tree_recurse(left_node, black_height, black_depth);
+ } else {
+ ret += (black_depth != black_height);
+ }
+
+ /* Right subtree. */
+ if (right_node != NULL) {
+ ret += tree_recurse(right_node, black_height, black_depth);
+ } else {
+ ret += (black_depth != black_height);
+ }
+
+ return ret;
+}
+
+static node_t *
+tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
+ unsigned *i = (unsigned *)data;
+ node_t *search_node;
+
+ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Test rb_search(). */
+ search_node = tree_search(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_search() returned unexpected node");
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_nsearch() returned unexpected node");
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_psearch() returned unexpected node");
+
+ (*i)++;
+
+ return NULL;
+}
+
+static unsigned
+tree_iterate(tree_t *tree) {
+ unsigned i;
+
+ i = 0;
+ tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
+
+ return i;
+}
+
+static unsigned
+tree_iterate_reverse(tree_t *tree) {
+ unsigned i;
+
+ i = 0;
+ tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
+
+ return i;
+}
+
+static void
+node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
+ node_t *search_node;
+ unsigned black_height, imbalances;
+
+ tree_remove(tree, node);
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ if (search_node != NULL) {
+ assert_u64_ge(search_node->key, node->key,
+ "Key ordering error");
+ }
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ if (search_node != NULL) {
+ assert_u64_le(search_node->key, node->key,
+ "Key ordering error");
+ }
+
+ node->magic = 0;
+
+ rbtn_black_height(node_t, link, tree, black_height);
+ imbalances = tree_recurse(tree->rbt_root, black_height, 0);
+ assert_u_eq(imbalances, 0, "Tree is unbalanced");
+ assert_u_eq(tree_iterate(tree), nnodes-1,
+ "Unexpected node iteration count");
+ assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
+ "Unexpected node iteration count");
+}
+
+static node_t *
+remove_iterate_cb(tree_t *tree, node_t *node, void *data) {
+ unsigned *nnodes = (unsigned *)data;
+ node_t *ret = tree_next(tree, node);
+
+ node_remove(tree, node, *nnodes);
+
+ return ret;
+}
+
+static node_t *
+remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) {
+ unsigned *nnodes = (unsigned *)data;
+ node_t *ret = tree_prev(tree, node);
+
+ node_remove(tree, node, *nnodes);
+
+ return ret;
+}
+
+static void
+destroy_cb(node_t *node, void *data) {
+ unsigned *nnodes = (unsigned *)data;
+
+ assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
+ (*nnodes)--;
+}
+
+TEST_BEGIN(test_rb_random) {
+#define NNODES 25
+#define NBAGS 250
+#define SEED 42
+ sfmt_t *sfmt;
+ uint64_t bag[NNODES];
+ tree_t tree;
+ node_t nodes[NNODES];
+ unsigned i, j, k, black_height, imbalances;
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 0; i < NBAGS; i++) {
+ switch (i) {
+ case 0:
+ /* Insert in order. */
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = j;
+ }
+ break;
+ case 1:
+ /* Insert in reverse order. */
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = NNODES - j - 1;
+ }
+ break;
+ default:
+ for (j = 0; j < NNODES; j++) {
+ bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
+ }
+
+ for (j = 1; j <= NNODES; j++) {
+ /* Initialize tree and nodes. */
+ tree_new(&tree);
+ for (k = 0; k < j; k++) {
+ nodes[k].magic = NODE_MAGIC;
+ nodes[k].key = bag[k];
+ }
+
+ /* Insert nodes. */
+ for (k = 0; k < j; k++) {
+ tree_insert(&tree, &nodes[k]);
+
+ rbtn_black_height(node_t, link, &tree,
+ black_height);
+ imbalances = tree_recurse(tree.rbt_root,
+ black_height, 0);
+ assert_u_eq(imbalances, 0,
+ "Tree is unbalanced");
+
+ assert_u_eq(tree_iterate(&tree), k+1,
+ "Unexpected node iteration count");
+ assert_u_eq(tree_iterate_reverse(&tree), k+1,
+ "Unexpected node iteration count");
+
+ assert_false(tree_empty(&tree),
+ "Tree should not be empty");
+ assert_ptr_not_null(tree_first(&tree),
+ "Tree should not be empty");
+ assert_ptr_not_null(tree_last(&tree),
+ "Tree should not be empty");
+
+ tree_next(&tree, &nodes[k]);
+ tree_prev(&tree, &nodes[k]);
+ }
+
+ /* Remove nodes. */
+ switch (i % 5) {
+ case 0:
+ for (k = 0; k < j; k++) {
+ node_remove(&tree, &nodes[k], j - k);
+ }
+ break;
+ case 1:
+ for (k = j; k > 0; k--) {
+ node_remove(&tree, &nodes[k-1], k);
+ }
+ break;
+ case 2: {
+ node_t *start;
+ unsigned nnodes = j;
+
+ start = NULL;
+ do {
+ start = tree_iter(&tree, start,
+ remove_iterate_cb, (void *)&nnodes);
+ nnodes--;
+ } while (start != NULL);
+ assert_u_eq(nnodes, 0,
+ "Removal terminated early");
+ break;
+ } case 3: {
+ node_t *start;
+ unsigned nnodes = j;
+
+ start = NULL;
+ do {
+ start = tree_reverse_iter(&tree, start,
+ remove_reverse_iterate_cb,
+ (void *)&nnodes);
+ nnodes--;
+ } while (start != NULL);
+ assert_u_eq(nnodes, 0,
+ "Removal terminated early");
+ break;
+ } case 4: {
+ unsigned nnodes = j;
+ tree_destroy(&tree, destroy_cb, &nnodes);
+ assert_u_eq(nnodes, 0,
+ "Destruction terminated early");
+ break;
+ } default:
+ not_reached();
+ }
+ }
+ }
+ fini_gen_rand(sfmt);
+#undef NNODES
+#undef NBAGS
+#undef SEED
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_rb_empty,
+ test_rb_random);
+}
diff --git a/deps/jemalloc/test/unit/retained.c b/deps/jemalloc/test/unit/retained.c
new file mode 100644
index 0000000..7993fd3
--- /dev/null
+++ b/deps/jemalloc/test/unit/retained.c
@@ -0,0 +1,184 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/spin.h"
+
+static unsigned arena_ind;
+static size_t sz;
+static size_t esz;
+#define NEPOCHS 8
+#define PER_THD_NALLOCS 1
+static atomic_u_t epoch;
+static atomic_u_t nfinished;
+
+static unsigned
+do_arena_create(extent_hooks_t *h) {
+ unsigned arena_ind;
+ size_t sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
+ "Unexpected mallctl() failure");
+ return arena_ind;
+}
+
+static void
+do_arena_destroy(unsigned arena_ind) {
+ size_t mib[3];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = (size_t)arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+
+static void
+do_refresh(void) {
+ uint64_t epoch = 1;
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ sizeof(epoch)), 0, "Unexpected mallctl() failure");
+}
+
+static size_t
+do_get_size_impl(const char *cmd, unsigned arena_ind) {
+ size_t mib[4];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ size_t z = sizeof(size_t);
+
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = arena_ind;
+ size_t size;
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
+
+ return size;
+}
+
+static size_t
+do_get_active(unsigned arena_ind) {
+ return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE;
+}
+
+static size_t
+do_get_mapped(unsigned arena_ind) {
+ return do_get_size_impl("stats.arenas.0.mapped", arena_ind);
+}
+
+static void *
+thd_start(void *arg) {
+ for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
+ /* Busy-wait for next epoch. */
+ unsigned cur_epoch;
+ spin_t spinner = SPIN_INITIALIZER;
+ while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
+ next_epoch) {
+ spin_adaptive(&spinner);
+ }
+ assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
+
+ /*
+ * Allocate. The main thread will reset the arena, so there's
+ * no need to deallocate.
+ */
+ for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
+ void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
+ MALLOCX_TCACHE_NONE
+ );
+ assert_ptr_not_null(p,
+ "Unexpected mallocx() failure\n");
+ }
+
+ /* Let the main thread know we've finished this iteration. */
+ atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_retained) {
+ test_skip_if(!config_stats);
+
+ arena_ind = do_arena_create(NULL);
+ sz = nallocx(HUGEPAGE, 0);
+ esz = sz + sz_large_pad;
+
+ atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
+
+ unsigned nthreads = ncpus * 2;
+ if (LG_SIZEOF_PTR < 3 && nthreads > 16) {
+ nthreads = 16; /* 32-bit platform could run out of vaddr. */
+ }
+ VARIABLE_ARRAY(thd_t, threads, nthreads);
+ for (unsigned i = 0; i < nthreads; i++) {
+ thd_create(&threads[i], thd_start, NULL);
+ }
+
+ for (unsigned e = 1; e < NEPOCHS; e++) {
+ atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
+ atomic_store_u(&epoch, e, ATOMIC_RELEASE);
+
+ /* Wait for threads to finish allocating. */
+ spin_t spinner = SPIN_INITIALIZER;
+ while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
+ spin_adaptive(&spinner);
+ }
+
+ /*
+ * Assert that retained is no more than the sum of size classes
+ * that should have been used to satisfy the worker threads'
+ * requests, discounting per growth fragmentation.
+ */
+ do_refresh();
+
+ size_t allocated = esz * nthreads * PER_THD_NALLOCS;
+ size_t active = do_get_active(arena_ind);
+ assert_zu_le(allocated, active, "Unexpected active memory");
+ size_t mapped = do_get_mapped(arena_ind);
+ assert_zu_le(active, mapped, "Unexpected mapped memory");
+
+ arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
+ size_t usable = 0;
+ size_t fragmented = 0;
+ for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
+ arena->extent_grow_next; pind++) {
+ size_t psz = sz_pind2sz(pind);
+ size_t psz_fragmented = psz % esz;
+ size_t psz_usable = psz - psz_fragmented;
+ /*
+ * Only consider size classes that wouldn't be skipped.
+ */
+ if (psz_usable > 0) {
+ assert_zu_lt(usable, allocated,
+ "Excessive retained memory "
+ "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
+ allocated);
+ fragmented += psz_fragmented;
+ usable += psz_usable;
+ }
+ }
+
+ /*
+ * Clean up arena. Destroying and recreating the arena
+ * is simpler that specifying extent hooks that deallocate
+ * (rather than retaining) during reset.
+ */
+ do_arena_destroy(arena_ind);
+ assert_u_eq(do_arena_create(NULL), arena_ind,
+ "Unexpected arena index");
+ }
+
+ for (unsigned i = 0; i < nthreads; i++) {
+ thd_join(threads[i], NULL);
+ }
+
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_retained);
+}
diff --git a/deps/jemalloc/test/unit/rtree.c b/deps/jemalloc/test/unit/rtree.c
new file mode 100644
index 0000000..90adca1
--- /dev/null
+++ b/deps/jemalloc/test/unit/rtree.c
@@ -0,0 +1,228 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/rtree.h"
+
+rtree_node_alloc_t *rtree_node_alloc_orig;
+rtree_node_dalloc_t *rtree_node_dalloc_orig;
+rtree_leaf_alloc_t *rtree_leaf_alloc_orig;
+rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig;
+
+/* Potentially too large to safely place on the stack. */
+rtree_t test_rtree;
+
+static rtree_node_elm_t *
+rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ rtree_node_elm_t *node;
+
+ if (rtree != &test_rtree) {
+ return rtree_node_alloc_orig(tsdn, rtree, nelms);
+ }
+
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+ node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t));
+ assert_ptr_not_null(node, "Unexpected calloc() failure");
+ malloc_mutex_lock(tsdn, &rtree->init_lock);
+
+ return node;
+}
+
+static void
+rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_node_elm_t *node) {
+ if (rtree != &test_rtree) {
+ rtree_node_dalloc_orig(tsdn, rtree, node);
+ return;
+ }
+
+ free(node);
+}
+
+static rtree_leaf_elm_t *
+rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ rtree_leaf_elm_t *leaf;
+
+ if (rtree != &test_rtree) {
+ return rtree_leaf_alloc_orig(tsdn, rtree, nelms);
+ }
+
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+ leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t));
+ assert_ptr_not_null(leaf, "Unexpected calloc() failure");
+ malloc_mutex_lock(tsdn, &rtree->init_lock);
+
+ return leaf;
+}
+
+static void
+rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *leaf) {
+ if (rtree != &test_rtree) {
+ rtree_leaf_dalloc_orig(tsdn, rtree, leaf);
+ return;
+ }
+
+ free(leaf);
+}
+
+TEST_BEGIN(test_rtree_read_empty) {
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+
+ rtree_t *rtree = &test_rtree;
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+ assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
+ assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE,
+ false), "rtree_extent_read() should return NULL for empty tree");
+ rtree_delete(tsdn, rtree);
+}
+TEST_END
+
+#undef NTHREADS
+#undef NITERS
+#undef SEED
+
+TEST_BEGIN(test_rtree_extrema) {
+ extent_t extent_a, extent_b;
+ extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false,
+ sz_size2index(SC_LARGE_MINCLASS), 0,
+ extent_state_active, false, false, true, EXTENT_NOT_HEAD);
+ extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, true, EXTENT_NOT_HEAD);
+
+ tsdn_t *tsdn = tsdn_fetch();
+
+ rtree_t *rtree = &test_rtree;
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+ assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
+
+ assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a,
+ extent_szind_get(&extent_a), extent_slab_get(&extent_a)),
+ "Unexpected rtree_write() failure");
+ rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE,
+ extent_szind_get(&extent_a), extent_slab_get(&extent_a));
+ assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true),
+ &extent_a,
+ "rtree_extent_read() should return previously set value");
+
+ assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
+ &extent_b, extent_szind_get_maybe_invalid(&extent_b),
+ extent_slab_get(&extent_b)), "Unexpected rtree_write() failure");
+ assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ ~((uintptr_t)0), true), &extent_b,
+ "rtree_extent_read() should return previously set value");
+
+ rtree_delete(tsdn, rtree);
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_bits) {
+ tsdn_t *tsdn = tsdn_fetch();
+
+ uintptr_t keys[] = {PAGE, PAGE + 1,
+ PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
+
+ extent_t extent;
+ extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, true, EXTENT_NOT_HEAD);
+
+ rtree_t *rtree = &test_rtree;
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+ assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
+
+ for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
+ assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
+ &extent, SC_NSIZES, false),
+ "Unexpected rtree_write() failure");
+ for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
+ assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ keys[j], true), &extent,
+ "rtree_extent_read() should return previously set "
+ "value and ignore insignificant key bits; i=%u, "
+ "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
+ j, keys[i], keys[j]);
+ }
+ assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ (((uintptr_t)2) << LG_PAGE), false),
+ "Only leftmost rtree leaf should be set; i=%u", i);
+ rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
+ }
+
+ rtree_delete(tsdn, rtree);
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_random) {
+#define NSET 16
+#define SEED 42
+ sfmt_t *sfmt = init_gen_rand(SEED);
+ tsdn_t *tsdn = tsdn_fetch();
+ uintptr_t keys[NSET];
+ rtree_t *rtree = &test_rtree;
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+
+ extent_t extent;
+ extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, true, EXTENT_NOT_HEAD);
+
+ assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
+
+ for (unsigned i = 0; i < NSET; i++) {
+ keys[i] = (uintptr_t)gen_rand64(sfmt);
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree,
+ &rtree_ctx, keys[i], false, true);
+ assert_ptr_not_null(elm,
+ "Unexpected rtree_leaf_elm_lookup() failure");
+ rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES,
+ false);
+ assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ keys[i], true), &extent,
+ "rtree_extent_read() should return previously set value");
+ }
+ for (unsigned i = 0; i < NSET; i++) {
+ assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ keys[i], true), &extent,
+ "rtree_extent_read() should return previously set value, "
+ "i=%u", i);
+ }
+
+ for (unsigned i = 0; i < NSET; i++) {
+ rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
+ assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ keys[i], true),
+ "rtree_extent_read() should return previously set value");
+ }
+ for (unsigned i = 0; i < NSET; i++) {
+ assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ keys[i], true),
+ "rtree_extent_read() should return previously set value");
+ }
+
+ rtree_delete(tsdn, rtree);
+ fini_gen_rand(sfmt);
+#undef NSET
+#undef SEED
+}
+TEST_END
+
+int
+main(void) {
+ rtree_node_alloc_orig = rtree_node_alloc;
+ rtree_node_alloc = rtree_node_alloc_intercept;
+ rtree_node_dalloc_orig = rtree_node_dalloc;
+ rtree_node_dalloc = rtree_node_dalloc_intercept;
+ rtree_leaf_alloc_orig = rtree_leaf_alloc;
+ rtree_leaf_alloc = rtree_leaf_alloc_intercept;
+ rtree_leaf_dalloc_orig = rtree_leaf_dalloc;
+ rtree_leaf_dalloc = rtree_leaf_dalloc_intercept;
+
+ return test(
+ test_rtree_read_empty,
+ test_rtree_extrema,
+ test_rtree_bits,
+ test_rtree_random);
+}
diff --git a/deps/jemalloc/test/unit/safety_check.c b/deps/jemalloc/test/unit/safety_check.c
new file mode 100644
index 0000000..bf4bd86
--- /dev/null
+++ b/deps/jemalloc/test/unit/safety_check.c
@@ -0,0 +1,156 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/safety_check.h"
+
+/*
+ * Note that we get called through safety_check.sh, which turns on sampling for
+ * everything.
+ */
+
+bool fake_abort_called;
+void fake_abort(const char *message) {
+ (void)message;
+ fake_abort_called = true;
+}
+
+TEST_BEGIN(test_malloc_free_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ ptr[128] = 0;
+ free(ptr);
+ safety_check_set_abort(NULL);
+
+ assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_mallocx_dallocx_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = mallocx(128, 0);
+ ptr[128] = 0;
+ dallocx(ptr, 0);
+ safety_check_set_abort(NULL);
+
+ assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_sdallocx_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ ptr[128] = 0;
+ sdallocx(ptr, 128, 0);
+ safety_check_set_abort(NULL);
+
+ assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_realloc_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ ptr[128] = 0;
+ ptr = realloc(ptr, 129);
+ safety_check_set_abort(NULL);
+ free(ptr);
+
+ assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_rallocx_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ ptr[128] = 0;
+ ptr = rallocx(ptr, 129, 0);
+ safety_check_set_abort(NULL);
+ free(ptr);
+
+ assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+}
+TEST_END
+
+TEST_BEGIN(test_xallocx_overflow) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_opt_safety_checks);
+
+ safety_check_set_abort(&fake_abort);
+ /* Buffer overflow! */
+ char* ptr = malloc(128);
+ ptr[128] = 0;
+ size_t result = xallocx(ptr, 129, 0, 0);
+ assert_zu_eq(result, 128, "");
+ free(ptr);
+ assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ fake_abort_called = false;
+ safety_check_set_abort(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_realloc_no_overflow) {
+ char* ptr = malloc(128);
+ ptr = realloc(ptr, 256);
+ ptr[128] = 0;
+ ptr[255] = 0;
+ free(ptr);
+
+ ptr = malloc(128);
+ ptr = realloc(ptr, 64);
+ ptr[63] = 0;
+ ptr[0] = 0;
+ free(ptr);
+}
+TEST_END
+
+TEST_BEGIN(test_rallocx_no_overflow) {
+ char* ptr = malloc(128);
+ ptr = rallocx(ptr, 256, 0);
+ ptr[128] = 0;
+ ptr[255] = 0;
+ free(ptr);
+
+ ptr = malloc(128);
+ ptr = rallocx(ptr, 64, 0);
+ ptr[63] = 0;
+ ptr[0] = 0;
+ free(ptr);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_malloc_free_overflow,
+ test_mallocx_dallocx_overflow,
+ test_malloc_sdallocx_overflow,
+ test_realloc_overflow,
+ test_rallocx_overflow,
+ test_xallocx_overflow,
+ test_realloc_no_overflow,
+ test_rallocx_no_overflow);
+}
diff --git a/deps/jemalloc/test/unit/safety_check.sh b/deps/jemalloc/test/unit/safety_check.sh
new file mode 100644
index 0000000..8fcc7d8
--- /dev/null
+++ b/deps/jemalloc/test/unit/safety_check.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/sc.c b/deps/jemalloc/test/unit/sc.c
new file mode 100644
index 0000000..bf51d8e
--- /dev/null
+++ b/deps/jemalloc/test/unit/sc.c
@@ -0,0 +1,33 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_update_slab_size) {
+ sc_data_t data;
+ memset(&data, 0, sizeof(data));
+ sc_data_init(&data);
+ sc_t *tiny = &data.sc[0];
+ size_t tiny_size = (ZU(1) << tiny->lg_base)
+ + (ZU(tiny->ndelta) << tiny->lg_delta);
+ size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1;
+ sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big);
+ assert_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
+
+ sc_data_update_slab_size(&data, 1, 10 * PAGE, 1);
+ for (int i = 0; i < data.nbins; i++) {
+ sc_t *sc = &data.sc[i];
+ size_t reg_size = (ZU(1) << sc->lg_base)
+ + (ZU(sc->ndelta) << sc->lg_delta);
+ if (reg_size <= PAGE) {
+ assert_d_eq(sc->pgs, 1, "Ignored valid page size hint");
+ } else {
+ assert_d_gt(sc->pgs, 1,
+ "Allowed invalid page size hint");
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_update_slab_size);
+}
diff --git a/deps/jemalloc/test/unit/seq.c b/deps/jemalloc/test/unit/seq.c
new file mode 100644
index 0000000..19613b0
--- /dev/null
+++ b/deps/jemalloc/test/unit/seq.c
@@ -0,0 +1,95 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/seq.h"
+
+typedef struct data_s data_t;
+struct data_s {
+ int arr[10];
+};
+
+static void
+set_data(data_t *data, int num) {
+ for (int i = 0; i < 10; i++) {
+ data->arr[i] = num;
+ }
+}
+
+static void
+assert_data(data_t *data) {
+ int num = data->arr[0];
+ for (int i = 0; i < 10; i++) {
+ assert_d_eq(num, data->arr[i], "Data consistency error");
+ }
+}
+
+seq_define(data_t, data)
+
+typedef struct thd_data_s thd_data_t;
+struct thd_data_s {
+ seq_data_t data;
+};
+
+static void *
+seq_reader_thd(void *arg) {
+ thd_data_t *thd_data = (thd_data_t *)arg;
+ int iter = 0;
+ data_t local_data;
+ while (iter < 1000 * 1000 - 1) {
+ bool success = seq_try_load_data(&local_data, &thd_data->data);
+ if (success) {
+ assert_data(&local_data);
+ assert_d_le(iter, local_data.arr[0],
+ "Seq read went back in time.");
+ iter = local_data.arr[0];
+ }
+ }
+ return NULL;
+}
+
+static void *
+seq_writer_thd(void *arg) {
+ thd_data_t *thd_data = (thd_data_t *)arg;
+ data_t local_data;
+ memset(&local_data, 0, sizeof(local_data));
+ for (int i = 0; i < 1000 * 1000; i++) {
+ set_data(&local_data, i);
+ seq_store_data(&thd_data->data, &local_data);
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_seq_threaded) {
+ thd_data_t thd_data;
+ memset(&thd_data, 0, sizeof(thd_data));
+
+ thd_t reader;
+ thd_t writer;
+
+ thd_create(&reader, seq_reader_thd, &thd_data);
+ thd_create(&writer, seq_writer_thd, &thd_data);
+
+ thd_join(reader, NULL);
+ thd_join(writer, NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_seq_simple) {
+ data_t data;
+ seq_data_t seq;
+ memset(&seq, 0, sizeof(seq));
+ for (int i = 0; i < 1000 * 1000; i++) {
+ set_data(&data, i);
+ seq_store_data(&seq, &data);
+ set_data(&data, 0);
+ bool success = seq_try_load_data(&data, &seq);
+ assert_b_eq(success, true, "Failed non-racing read");
+ assert_data(&data);
+ }
+}
+TEST_END
+
+int main(void) {
+ return test_no_reentrancy(
+ test_seq_simple,
+ test_seq_threaded);
+}
diff --git a/deps/jemalloc/test/unit/size_classes.c b/deps/jemalloc/test/unit/size_classes.c
new file mode 100644
index 0000000..6947336
--- /dev/null
+++ b/deps/jemalloc/test/unit/size_classes.c
@@ -0,0 +1,188 @@
+#include "test/jemalloc_test.h"
+
+static size_t
+get_max_size_class(void) {
+ unsigned nlextents;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ 0), 0, "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nlextents - 1;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
+ NULL, 0), 0, "Unexpected mallctlbymib() error");
+
+ return max_size_class;
+}
+
+TEST_BEGIN(test_size_classes) {
+ size_t size_class, max_size_class;
+ szind_t index, max_index;
+
+ max_size_class = get_max_size_class();
+ max_index = sz_size2index(max_size_class);
+
+ for (index = 0, size_class = sz_index2size(index); index < max_index ||
+ size_class < max_size_class; index++, size_class =
+ sz_index2size(index)) {
+ assert_true(index < max_index,
+ "Loop conditionals should be equivalent; index=%u, "
+ "size_class=%zu (%#zx)", index, size_class, size_class);
+ assert_true(size_class < max_size_class,
+ "Loop conditionals should be equivalent; index=%u, "
+ "size_class=%zu (%#zx)", index, size_class, size_class);
+
+ assert_u_eq(index, sz_size2index(size_class),
+ "sz_size2index() does not reverse sz_index2size(): index=%u"
+ " --> size_class=%zu --> index=%u --> size_class=%zu",
+ index, size_class, sz_size2index(size_class),
+ sz_index2size(sz_size2index(size_class)));
+ assert_zu_eq(size_class,
+ sz_index2size(sz_size2index(size_class)),
+ "sz_index2size() does not reverse sz_size2index(): index=%u"
+ " --> size_class=%zu --> index=%u --> size_class=%zu",
+ index, size_class, sz_size2index(size_class),
+ sz_index2size(sz_size2index(size_class)));
+
+ assert_u_eq(index+1, sz_size2index(size_class+1),
+ "Next size_class does not round up properly");
+
+ assert_zu_eq(size_class, (index > 0) ?
+ sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1),
+ "sz_s2u() does not round up to size class");
+ assert_zu_eq(size_class, sz_s2u(size_class-1),
+ "sz_s2u() does not round up to size class");
+ assert_zu_eq(size_class, sz_s2u(size_class),
+ "sz_s2u() does not compute same size class");
+ assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
+ "sz_s2u() does not round up to next size class");
+ }
+
+ assert_u_eq(index, sz_size2index(sz_index2size(index)),
+ "sz_size2index() does not reverse sz_index2size()");
+ assert_zu_eq(max_size_class, sz_index2size(
+ sz_size2index(max_size_class)),
+ "sz_index2size() does not reverse sz_size2index()");
+
+ assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
+ "sz_s2u() does not round up to size class");
+ assert_zu_eq(size_class, sz_s2u(size_class-1),
+ "sz_s2u() does not round up to size class");
+ assert_zu_eq(size_class, sz_s2u(size_class),
+ "sz_s2u() does not compute same size class");
+}
+TEST_END
+
+TEST_BEGIN(test_psize_classes) {
+ size_t size_class, max_psz;
+ pszind_t pind, max_pind;
+
+ max_psz = get_max_size_class() + PAGE;
+ max_pind = sz_psz2ind(max_psz);
+
+ for (pind = 0, size_class = sz_pind2sz(pind);
+ pind < max_pind || size_class < max_psz;
+ pind++, size_class = sz_pind2sz(pind)) {
+ assert_true(pind < max_pind,
+ "Loop conditionals should be equivalent; pind=%u, "
+ "size_class=%zu (%#zx)", pind, size_class, size_class);
+ assert_true(size_class < max_psz,
+ "Loop conditionals should be equivalent; pind=%u, "
+ "size_class=%zu (%#zx)", pind, size_class, size_class);
+
+ assert_u_eq(pind, sz_psz2ind(size_class),
+ "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->"
+ " size_class=%zu --> pind=%u --> size_class=%zu", pind,
+ size_class, sz_psz2ind(size_class),
+ sz_pind2sz(sz_psz2ind(size_class)));
+ assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
+ "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->"
+ " size_class=%zu --> pind=%u --> size_class=%zu", pind,
+ size_class, sz_psz2ind(size_class),
+ sz_pind2sz(sz_psz2ind(size_class)));
+
+ if (size_class == SC_LARGE_MAXCLASS) {
+ assert_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
+ "Next size_class does not round up properly");
+ } else {
+ assert_u_eq(pind + 1, sz_psz2ind(size_class + 1),
+ "Next size_class does not round up properly");
+ }
+
+ assert_zu_eq(size_class, (pind > 0) ?
+ sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1),
+ "sz_psz2u() does not round up to size class");
+ assert_zu_eq(size_class, sz_psz2u(size_class-1),
+ "sz_psz2u() does not round up to size class");
+ assert_zu_eq(size_class, sz_psz2u(size_class),
+ "sz_psz2u() does not compute same size class");
+ assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
+ "sz_psz2u() does not round up to next size class");
+ }
+
+ assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
+ "sz_psz2ind() does not reverse sz_pind2sz()");
+ assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
+ "sz_pind2sz() does not reverse sz_psz2ind()");
+
+ assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
+ "sz_psz2u() does not round up to size class");
+ assert_zu_eq(size_class, sz_psz2u(size_class-1),
+ "sz_psz2u() does not round up to size class");
+ assert_zu_eq(size_class, sz_psz2u(size_class),
+ "sz_psz2u() does not compute same size class");
+}
+TEST_END
+
+TEST_BEGIN(test_overflow) {
+ size_t max_size_class, max_psz;
+
+ max_size_class = get_max_size_class();
+ max_psz = max_size_class + PAGE;
+
+ assert_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
+ "sz_size2index() should return NSIZES on overflow");
+ assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
+ "sz_size2index() should return NSIZES on overflow");
+ assert_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
+ "sz_size2index() should return NSIZES on overflow");
+
+ assert_zu_eq(sz_s2u(max_size_class+1), 0,
+ "sz_s2u() should return 0 for unsupported size");
+ assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
+ "sz_s2u() should return 0 for unsupported size");
+ assert_zu_eq(sz_s2u(SIZE_T_MAX), 0,
+ "sz_s2u() should return 0 on overflow");
+
+ assert_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
+ "sz_psz2ind() should return NPSIZES on overflow");
+ assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
+ "sz_psz2ind() should return NPSIZES on overflow");
+ assert_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
+ "sz_psz2ind() should return NPSIZES on overflow");
+
+ assert_zu_eq(sz_psz2u(max_size_class+1), max_psz,
+ "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
+ " size");
+ assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
+ "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
+ "size");
+ assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
+ "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_size_classes,
+ test_psize_classes,
+ test_overflow);
+}
diff --git a/deps/jemalloc/test/unit/slab.c b/deps/jemalloc/test/unit/slab.c
new file mode 100644
index 0000000..c56af25
--- /dev/null
+++ b/deps/jemalloc/test/unit/slab.c
@@ -0,0 +1,33 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_arena_slab_regind) {
+ szind_t binind;
+
+ for (binind = 0; binind < SC_NBINS; binind++) {
+ size_t regind;
+ extent_t slab;
+ const bin_info_t *bin_info = &bin_infos[binind];
+ extent_init(&slab, NULL, mallocx(bin_info->slab_size,
+ MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true,
+ binind, 0, extent_state_active, false, true, true,
+ EXTENT_NOT_HEAD);
+ assert_ptr_not_null(extent_addr_get(&slab),
+ "Unexpected malloc() failure");
+ for (regind = 0; regind < bin_info->nregs; regind++) {
+ void *reg = (void *)((uintptr_t)extent_addr_get(&slab) +
+ (bin_info->reg_size * regind));
+ assert_zu_eq(arena_slab_regind(&slab, binind, reg),
+ regind,
+ "Incorrect region index computed for size %zu",
+ bin_info->reg_size);
+ }
+ free(extent_addr_get(&slab));
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_arena_slab_regind);
+}
diff --git a/deps/jemalloc/test/unit/smoothstep.c b/deps/jemalloc/test/unit/smoothstep.c
new file mode 100644
index 0000000..7c5dbb7
--- /dev/null
+++ b/deps/jemalloc/test/unit/smoothstep.c
@@ -0,0 +1,102 @@
+#include "test/jemalloc_test.h"
+
+static const uint64_t smoothstep_tab[] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
+};
+
+TEST_BEGIN(test_smoothstep_integral) {
+ uint64_t sum, min, max;
+ unsigned i;
+
+ /*
+ * The integral of smoothstep in the [0..1] range equals 1/2. Verify
+ * that the fixed point representation's integral is no more than
+ * rounding error distant from 1/2. Regarding rounding, each table
+ * element is rounded down to the nearest fixed point value, so the
+ * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
+ */
+ sum = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += smoothstep_tab[i];
+ }
+
+ max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
+ min = max - SMOOTHSTEP_NSTEPS;
+
+ assert_u64_ge(sum, min,
+ "Integral too small, even accounting for truncation");
+ assert_u64_le(sum, max, "Integral exceeds 1/2");
+ if (false) {
+ malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
+ max - sum, SMOOTHSTEP_NSTEPS);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_smoothstep_monotonic) {
+ uint64_t prev_h;
+ unsigned i;
+
+ /*
+ * The smoothstep function is monotonic in [0..1], i.e. its slope is
+ * non-negative. In practice we want to parametrize table generation
+ * such that piecewise slope is greater than zero, but do not require
+ * that here.
+ */
+ prev_h = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ uint64_t h = smoothstep_tab[i];
+ assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
+ prev_h = h;
+ }
+ assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
+ (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
+}
+TEST_END
+
+TEST_BEGIN(test_smoothstep_slope) {
+ uint64_t prev_h, prev_delta;
+ unsigned i;
+
+ /*
+ * The smoothstep slope strictly increases until x=0.5, and then
+ * strictly decreases until x=1.0. Verify the slightly weaker
+ * requirement of monotonicity, so that inadequate table precision does
+ * not cause false test failures.
+ */
+ prev_h = 0;
+ prev_delta = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
+ uint64_t h = smoothstep_tab[i];
+ uint64_t delta = h - prev_h;
+ assert_u64_ge(delta, prev_delta,
+ "Slope must monotonically increase in 0.0 <= x <= 0.5, "
+ "i=%u", i);
+ prev_h = h;
+ prev_delta = delta;
+ }
+
+ prev_h = KQU(1) << SMOOTHSTEP_BFP;
+ prev_delta = 0;
+ for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
+ uint64_t h = smoothstep_tab[i];
+ uint64_t delta = prev_h - h;
+ assert_u64_ge(delta, prev_delta,
+ "Slope must monotonically decrease in 0.5 <= x <= 1.0, "
+ "i=%u", i);
+ prev_h = h;
+ prev_delta = delta;
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_smoothstep_integral,
+ test_smoothstep_monotonic,
+ test_smoothstep_slope);
+}
diff --git a/deps/jemalloc/test/unit/spin.c b/deps/jemalloc/test/unit/spin.c
new file mode 100644
index 0000000..b965f74
--- /dev/null
+++ b/deps/jemalloc/test/unit/spin.c
@@ -0,0 +1,18 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/spin.h"
+
+TEST_BEGIN(test_spin) {
+ spin_t spinner = SPIN_INITIALIZER;
+
+ for (unsigned i = 0; i < 100; i++) {
+ spin_adaptive(&spinner);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_spin);
+}
diff --git a/deps/jemalloc/test/unit/stats.c b/deps/jemalloc/test/unit/stats.c
new file mode 100644
index 0000000..646768e
--- /dev/null
+++ b/deps/jemalloc/test/unit/stats.c
@@ -0,0 +1,374 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_stats_summary) {
+ size_t sz, allocated, active, resident, mapped;
+ int expected = config_stats ? 0 : ENOENT;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
+ 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_le(allocated, active,
+ "allocated should be no larger than active");
+ assert_zu_lt(active, resident,
+ "active should be less than resident");
+ assert_zu_lt(active, mapped,
+ "active should be less than mapped");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_stats_large) {
+ void *p;
+ uint64_t epoch;
+ size_t allocated;
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.large.allocated",
+ (void *)&allocated, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.nrequests",
+ (void *)&nrequests, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_le(nmalloc, nrequests,
+ "nmalloc should no larger than nrequests");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_summary) {
+ void *little, *large;
+ uint64_t epoch;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+ size_t mapped;
+ uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
+ uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
+
+ little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
+ assert_ptr_not_null(little, "Unexpected mallocx() failure");
+ large = mallocx((1U << SC_LG_LARGE_MINCLASS),
+ MALLOCX_ARENA(0));
+ assert_ptr_not_null(large, "Unexpected mallocx() failure");
+
+ dallocx(little, 0);
+ dallocx(large, 0);
+
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
+ 0), expected, "Unexepected mallctl() result");
+
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.dirty_npurge",
+ (void *)&dirty_npurge, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
+ (void *)&dirty_nmadvise, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.dirty_purged",
+ (void *)&dirty_purged, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
+ (void *)&muzzy_npurge, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
+ (void *)&muzzy_nmadvise, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.muzzy_purged",
+ (void *)&muzzy_purged, &sz, NULL, 0), expected,
+ "Unexepected mallctl() result");
+
+ if (config_stats) {
+ if (!background_thread_enabled()) {
+ assert_u64_gt(dirty_npurge + muzzy_npurge, 0,
+ "At least one purge should have occurred");
+ }
+ assert_u64_le(dirty_nmadvise, dirty_purged,
+ "dirty_nmadvise should be no greater than dirty_purged");
+ assert_u64_le(muzzy_nmadvise, muzzy_purged,
+ "muzzy_nmadvise should be no greater than muzzy_purged");
+ }
+}
+TEST_END
+
+void *
+thd_start(void *arg) {
+ return NULL;
+}
+
+static void
+no_lazy_lock(void) {
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+
+TEST_BEGIN(test_stats_arenas_small) {
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc, nrequests;
+ int expected = config_stats ? 0 : ENOENT;
+
+ no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
+
+ p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.small.allocated",
+ (void *)&allocated, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.nrequests",
+ (void *)&nrequests, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be no greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_large) {
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc;
+ int expected = config_stats ? 0 : ENOENT;
+
+ p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.large.allocated",
+ (void *)&allocated, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static void
+gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) {
+ sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name);
+}
+
+TEST_BEGIN(test_stats_arenas_bins) {
+ void *p;
+ size_t sz, curslabs, curregs, nonfull_slabs;
+ uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t nslabs, nreslabs;
+ int expected = config_stats ? 0 : ENOENT;
+
+ /* Make sure allocation below isn't satisfied by tcache. */
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
+
+ unsigned arena_ind, old_arena_ind;
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Arena creation failure");
+ sz = sizeof(arena_ind);
+ assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ (void *)&arena_ind, sizeof(arena_ind)), 0,
+ "Unexpected mallctl() failure");
+
+ p = malloc(bin_infos[0].reg_size);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ char cmd[128];
+ sz = sizeof(uint64_t);
+ gen_mallctl_str(cmd, "nmalloc", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "ndalloc", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "nrequests", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ gen_mallctl_str(cmd, "curregs", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ sz = sizeof(uint64_t);
+ gen_mallctl_str(cmd, "nfills", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "nflushes", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ gen_mallctl_str(cmd, "nslabs", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "nreslabs", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ gen_mallctl_str(cmd, "curslabs", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
+ assert_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ assert_zu_gt(curregs, 0,
+ "allocated should be greater than zero");
+ if (opt_tcache) {
+ assert_u64_gt(nfills, 0,
+ "At least one fill should have occurred");
+ assert_u64_gt(nflushes, 0,
+ "At least one flush should have occurred");
+ }
+ assert_u64_gt(nslabs, 0,
+ "At least one slab should have been allocated");
+ assert_zu_gt(curslabs, 0,
+ "At least one slab should be currently allocated");
+ assert_zu_eq(nonfull_slabs, 0,
+ "slabs_nonfull should be empty");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_lextents) {
+ void *p;
+ uint64_t epoch, nmalloc, ndalloc;
+ size_t curlextents, sz, hsize;
+ int expected = config_stats ? 0 : ENOENT;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+
+ p = mallocx(hsize, MALLOCX_ARENA(0));
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
+ (void *)&nmalloc, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
+ (void *)&ndalloc, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
+ (void *)&curlextents, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(curlextents, 0,
+ "At least one extent should be currently allocated");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_stats_summary,
+ test_stats_large,
+ test_stats_arenas_summary,
+ test_stats_arenas_small,
+ test_stats_arenas_large,
+ test_stats_arenas_bins,
+ test_stats_arenas_lextents);
+}
diff --git a/deps/jemalloc/test/unit/stats_print.c b/deps/jemalloc/test/unit/stats_print.c
new file mode 100644
index 0000000..014d002
--- /dev/null
+++ b/deps/jemalloc/test/unit/stats_print.c
@@ -0,0 +1,999 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/util.h"
+
+typedef enum {
+ TOKEN_TYPE_NONE,
+ TOKEN_TYPE_ERROR,
+ TOKEN_TYPE_EOI,
+ TOKEN_TYPE_NULL,
+ TOKEN_TYPE_FALSE,
+ TOKEN_TYPE_TRUE,
+ TOKEN_TYPE_LBRACKET,
+ TOKEN_TYPE_RBRACKET,
+ TOKEN_TYPE_LBRACE,
+ TOKEN_TYPE_RBRACE,
+ TOKEN_TYPE_COLON,
+ TOKEN_TYPE_COMMA,
+ TOKEN_TYPE_STRING,
+ TOKEN_TYPE_NUMBER
+} token_type_t;
+
+typedef struct parser_s parser_t;
+typedef struct {
+ parser_t *parser;
+ token_type_t token_type;
+ size_t pos;
+ size_t len;
+ size_t line;
+ size_t col;
+} token_t;
+
+struct parser_s {
+ bool verbose;
+ char *buf; /* '\0'-terminated. */
+ size_t len; /* Number of characters preceding '\0' in buf. */
+ size_t pos;
+ size_t line;
+ size_t col;
+ token_t token;
+};
+
+static void
+token_init(token_t *token, parser_t *parser, token_type_t token_type,
+ size_t pos, size_t len, size_t line, size_t col) {
+ token->parser = parser;
+ token->token_type = token_type;
+ token->pos = pos;
+ token->len = len;
+ token->line = line;
+ token->col = col;
+}
+
+static void
+token_error(token_t *token) {
+ if (!token->parser->verbose) {
+ return;
+ }
+ switch (token->token_type) {
+ case TOKEN_TYPE_NONE:
+ not_reached();
+ case TOKEN_TYPE_ERROR:
+ malloc_printf("%zu:%zu: Unexpected character in token: ",
+ token->line, token->col);
+ break;
+ default:
+ malloc_printf("%zu:%zu: Unexpected token: ", token->line,
+ token->col);
+ break;
+ }
+ UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO,
+ &token->parser->buf[token->pos], token->len);
+ malloc_printf("\n");
+}
+
+static void
+parser_init(parser_t *parser, bool verbose) {
+ parser->verbose = verbose;
+ parser->buf = NULL;
+ parser->len = 0;
+ parser->pos = 0;
+ parser->line = 1;
+ parser->col = 0;
+}
+
+static void
+parser_fini(parser_t *parser) {
+ if (parser->buf != NULL) {
+ dallocx(parser->buf, MALLOCX_TCACHE_NONE);
+ }
+}
+
+static bool
+parser_append(parser_t *parser, const char *str) {
+ size_t len = strlen(str);
+ char *buf = (parser->buf == NULL) ? mallocx(len + 1,
+ MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1,
+ MALLOCX_TCACHE_NONE);
+ if (buf == NULL) {
+ return true;
+ }
+ memcpy(&buf[parser->len], str, len + 1);
+ parser->buf = buf;
+ parser->len += len;
+ return false;
+}
+
+static bool
+parser_tokenize(parser_t *parser) {
+ enum {
+ STATE_START,
+ STATE_EOI,
+ STATE_N, STATE_NU, STATE_NUL, STATE_NULL,
+ STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE,
+ STATE_T, STATE_TR, STATE_TRU, STATE_TRUE,
+ STATE_LBRACKET,
+ STATE_RBRACKET,
+ STATE_LBRACE,
+ STATE_RBRACE,
+ STATE_COLON,
+ STATE_COMMA,
+ STATE_CHARS,
+ STATE_CHAR_ESCAPE,
+ STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD,
+ STATE_STRING,
+ STATE_MINUS,
+ STATE_LEADING_ZERO,
+ STATE_DIGITS,
+ STATE_DECIMAL,
+ STATE_FRAC_DIGITS,
+ STATE_EXP,
+ STATE_EXP_SIGN,
+ STATE_EXP_DIGITS,
+ STATE_ACCEPT
+ } state = STATE_START;
+ size_t token_pos JEMALLOC_CC_SILENCE_INIT(0);
+ size_t token_line JEMALLOC_CC_SILENCE_INIT(1);
+ size_t token_col JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert_zu_le(parser->pos, parser->len,
+ "Position is past end of buffer");
+
+ while (state != STATE_ACCEPT) {
+ char c = parser->buf[parser->pos];
+
+ switch (state) {
+ case STATE_START:
+ token_pos = parser->pos;
+ token_line = parser->line;
+ token_col = parser->col;
+ switch (c) {
+ case ' ': case '\b': case '\n': case '\r': case '\t':
+ break;
+ case '\0':
+ state = STATE_EOI;
+ break;
+ case 'n':
+ state = STATE_N;
+ break;
+ case 'f':
+ state = STATE_F;
+ break;
+ case 't':
+ state = STATE_T;
+ break;
+ case '[':
+ state = STATE_LBRACKET;
+ break;
+ case ']':
+ state = STATE_RBRACKET;
+ break;
+ case '{':
+ state = STATE_LBRACE;
+ break;
+ case '}':
+ state = STATE_RBRACE;
+ break;
+ case ':':
+ state = STATE_COLON;
+ break;
+ case ',':
+ state = STATE_COMMA;
+ break;
+ case '"':
+ state = STATE_CHARS;
+ break;
+ case '-':
+ state = STATE_MINUS;
+ break;
+ case '0':
+ state = STATE_LEADING_ZERO;
+ break;
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_EOI:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_EOI, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_N:
+ switch (c) {
+ case 'u':
+ state = STATE_NU;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_NU:
+ switch (c) {
+ case 'l':
+ state = STATE_NUL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_NUL:
+ switch (c) {
+ case 'l':
+ state = STATE_NULL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_NULL:
+ switch (c) {
+ case ' ': case '\b': case '\n': case '\r': case '\t':
+ case '\0':
+ case '[': case ']': case '{': case '}': case ':':
+ case ',':
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ token_init(&parser->token, parser, TOKEN_TYPE_NULL,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_F:
+ switch (c) {
+ case 'a':
+ state = STATE_FA;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FA:
+ switch (c) {
+ case 'l':
+ state = STATE_FAL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FAL:
+ switch (c) {
+ case 's':
+ state = STATE_FALS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FALS:
+ switch (c) {
+ case 'e':
+ state = STATE_FALSE;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FALSE:
+ switch (c) {
+ case ' ': case '\b': case '\n': case '\r': case '\t':
+ case '\0':
+ case '[': case ']': case '{': case '}': case ':':
+ case ',':
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_FALSE, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_T:
+ switch (c) {
+ case 'r':
+ state = STATE_TR;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_TR:
+ switch (c) {
+ case 'u':
+ state = STATE_TRU;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_TRU:
+ switch (c) {
+ case 'e':
+ state = STATE_TRUE;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_TRUE:
+ switch (c) {
+ case ' ': case '\b': case '\n': case '\r': case '\t':
+ case '\0':
+ case '[': case ']': case '{': case '}': case ':':
+ case ',':
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ token_init(&parser->token, parser, TOKEN_TYPE_TRUE,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_LBRACKET:
+ token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_RBRACKET:
+ token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_LBRACE:
+ token_init(&parser->token, parser, TOKEN_TYPE_LBRACE,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_RBRACE:
+ token_init(&parser->token, parser, TOKEN_TYPE_RBRACE,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_COLON:
+ token_init(&parser->token, parser, TOKEN_TYPE_COLON,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_COMMA:
+ token_init(&parser->token, parser, TOKEN_TYPE_COMMA,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_CHARS:
+ switch (c) {
+ case '\\':
+ state = STATE_CHAR_ESCAPE;
+ break;
+ case '"':
+ state = STATE_STRING;
+ break;
+ case 0x00: case 0x01: case 0x02: case 0x03: case 0x04:
+ case 0x05: case 0x06: case 0x07: case 0x08: case 0x09:
+ case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e:
+ case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13:
+ case 0x14: case 0x15: case 0x16: case 0x17: case 0x18:
+ case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d:
+ case 0x1e: case 0x1f:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ default:
+ break;
+ }
+ break;
+ case STATE_CHAR_ESCAPE:
+ switch (c) {
+ case '"': case '\\': case '/': case 'b': case 'n':
+ case 'r': case 't':
+ state = STATE_CHARS;
+ break;
+ case 'u':
+ state = STATE_CHAR_U;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_CHAR_U:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ state = STATE_CHAR_UD;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_CHAR_UD:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ state = STATE_CHAR_UDD;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_CHAR_UDD:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ state = STATE_CHAR_UDDD;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_CHAR_UDDD:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ state = STATE_CHARS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_STRING:
+ token_init(&parser->token, parser, TOKEN_TYPE_STRING,
+ token_pos, parser->pos - token_pos, token_line,
+ token_col);
+ state = STATE_ACCEPT;
+ break;
+ case STATE_MINUS:
+ switch (c) {
+ case '0':
+ state = STATE_LEADING_ZERO;
+ break;
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_LEADING_ZERO:
+ switch (c) {
+ case '.':
+ state = STATE_DECIMAL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_NUMBER, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ }
+ break;
+ case STATE_DIGITS:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ break;
+ case '.':
+ state = STATE_DECIMAL;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_NUMBER, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ }
+ break;
+ case STATE_DECIMAL:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_FRAC_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_FRAC_DIGITS:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ break;
+ case 'e': case 'E':
+ state = STATE_EXP;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_NUMBER, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ }
+ break;
+ case STATE_EXP:
+ switch (c) {
+ case '-': case '+':
+ state = STATE_EXP_SIGN;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_EXP_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_EXP_SIGN:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ state = STATE_EXP_DIGITS;
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
+ - token_pos, token_line, token_col);
+ return true;
+ }
+ break;
+ case STATE_EXP_DIGITS:
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ break;
+ default:
+ token_init(&parser->token, parser,
+ TOKEN_TYPE_NUMBER, token_pos, parser->pos -
+ token_pos, token_line, token_col);
+ state = STATE_ACCEPT;
+ break;
+ }
+ break;
+ default:
+ not_reached();
+ }
+
+ if (state != STATE_ACCEPT) {
+ if (c == '\n') {
+ parser->line++;
+ parser->col = 0;
+ } else {
+ parser->col++;
+ }
+ parser->pos++;
+ }
+ }
+ return false;
+}
+
+static bool parser_parse_array(parser_t *parser);
+static bool parser_parse_object(parser_t *parser);
+
+static bool
+parser_parse_value(parser_t *parser) {
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_NULL:
+ case TOKEN_TYPE_FALSE:
+ case TOKEN_TYPE_TRUE:
+ case TOKEN_TYPE_STRING:
+ case TOKEN_TYPE_NUMBER:
+ return false;
+ case TOKEN_TYPE_LBRACE:
+ return parser_parse_object(parser);
+ case TOKEN_TYPE_LBRACKET:
+ return parser_parse_array(parser);
+ default:
+ return true;
+ }
+ not_reached();
+}
+
+static bool
+parser_parse_pair(parser_t *parser) {
+ assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
+ "Pair should start with string");
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_COLON:
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ return parser_parse_value(parser);
+ default:
+ return true;
+ }
+}
+
+static bool
+parser_parse_values(parser_t *parser) {
+ if (parser_parse_value(parser)) {
+ return true;
+ }
+
+ while (true) {
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_COMMA:
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ if (parser_parse_value(parser)) {
+ return true;
+ }
+ break;
+ case TOKEN_TYPE_RBRACKET:
+ return false;
+ default:
+ return true;
+ }
+ }
+}
+
+static bool
+parser_parse_array(parser_t *parser) {
+ assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
+ "Array should start with [");
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_RBRACKET:
+ return false;
+ default:
+ return parser_parse_values(parser);
+ }
+ not_reached();
+}
+
+static bool
+parser_parse_pairs(parser_t *parser) {
+ assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
+ "Object should start with string");
+ if (parser_parse_pair(parser)) {
+ return true;
+ }
+
+ while (true) {
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_COMMA:
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_STRING:
+ if (parser_parse_pair(parser)) {
+ return true;
+ }
+ break;
+ default:
+ return true;
+ }
+ break;
+ case TOKEN_TYPE_RBRACE:
+ return false;
+ default:
+ return true;
+ }
+ }
+}
+
+static bool
+parser_parse_object(parser_t *parser) {
+ assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
+ "Object should start with {");
+ if (parser_tokenize(parser)) {
+ return true;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_STRING:
+ return parser_parse_pairs(parser);
+ case TOKEN_TYPE_RBRACE:
+ return false;
+ default:
+ return true;
+ }
+ not_reached();
+}
+
+static bool
+parser_parse(parser_t *parser) {
+ if (parser_tokenize(parser)) {
+ goto label_error;
+ }
+ if (parser_parse_value(parser)) {
+ goto label_error;
+ }
+
+ if (parser_tokenize(parser)) {
+ goto label_error;
+ }
+ switch (parser->token.token_type) {
+ case TOKEN_TYPE_EOI:
+ return false;
+ default:
+ goto label_error;
+ }
+ not_reached();
+
+label_error:
+ token_error(&parser->token);
+ return true;
+}
+
+TEST_BEGIN(test_json_parser) {
+ size_t i;
+ const char *invalid_inputs[] = {
+ /* Tokenizer error case tests. */
+ "{ \"string\": X }",
+ "{ \"string\": nXll }",
+ "{ \"string\": nuXl }",
+ "{ \"string\": nulX }",
+ "{ \"string\": nullX }",
+ "{ \"string\": fXlse }",
+ "{ \"string\": faXse }",
+ "{ \"string\": falXe }",
+ "{ \"string\": falsX }",
+ "{ \"string\": falseX }",
+ "{ \"string\": tXue }",
+ "{ \"string\": trXe }",
+ "{ \"string\": truX }",
+ "{ \"string\": trueX }",
+ "{ \"string\": \"\n\" }",
+ "{ \"string\": \"\\z\" }",
+ "{ \"string\": \"\\uX000\" }",
+ "{ \"string\": \"\\u0X00\" }",
+ "{ \"string\": \"\\u00X0\" }",
+ "{ \"string\": \"\\u000X\" }",
+ "{ \"string\": -X }",
+ "{ \"string\": 0.X }",
+ "{ \"string\": 0.0eX }",
+ "{ \"string\": 0.0e+X }",
+
+ /* Parser error test cases. */
+ "{\"string\": }",
+ "{\"string\" }",
+ "{\"string\": [ 0 }",
+ "{\"string\": {\"a\":0, 1 } }",
+ "{\"string\": {\"a\":0: } }",
+ "{",
+ "{}{",
+ };
+ const char *valid_inputs[] = {
+ /* Token tests. */
+ "null",
+ "false",
+ "true",
+ "{}",
+ "{\"a\": 0}",
+ "[]",
+ "[0, 1]",
+ "0",
+ "1",
+ "10",
+ "-10",
+ "10.23",
+ "10.23e4",
+ "10.23e-4",
+ "10.23e+4",
+ "10.23E4",
+ "10.23E-4",
+ "10.23E+4",
+ "-10.23",
+ "-10.23e4",
+ "-10.23e-4",
+ "-10.23e+4",
+ "-10.23E4",
+ "-10.23E-4",
+ "-10.23E+4",
+ "\"value\"",
+ "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"",
+
+ /* Parser test with various nesting. */
+ "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}",
+ };
+
+ for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) {
+ const char *input = invalid_inputs[i];
+ parser_t parser;
+ parser_init(&parser, false);
+ assert_false(parser_append(&parser, input),
+ "Unexpected input appending failure");
+ assert_true(parser_parse(&parser),
+ "Unexpected parse success for input: %s", input);
+ parser_fini(&parser);
+ }
+
+ for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) {
+ const char *input = valid_inputs[i];
+ parser_t parser;
+ parser_init(&parser, true);
+ assert_false(parser_append(&parser, input),
+ "Unexpected input appending failure");
+ assert_false(parser_parse(&parser),
+ "Unexpected parse error for input: %s", input);
+ parser_fini(&parser);
+ }
+}
+TEST_END
+
+void
+write_cb(void *opaque, const char *str) {
+ parser_t *parser = (parser_t *)opaque;
+ if (parser_append(parser, str)) {
+ test_fail("Unexpected input appending failure");
+ }
+}
+
+TEST_BEGIN(test_stats_print_json) {
+ const char *opts[] = {
+ "J",
+ "Jg",
+ "Jm",
+ "Jd",
+ "Jmd",
+ "Jgd",
+ "Jgm",
+ "Jgmd",
+ "Ja",
+ "Jb",
+ "Jl",
+ "Jx",
+ "Jbl",
+ "Jal",
+ "Jab",
+ "Jabl",
+ "Jax",
+ "Jbx",
+ "Jlx",
+ "Jablx",
+ "Jgmdablx",
+ };
+ unsigned arena_ind, i;
+
+ for (i = 0; i < 3; i++) {
+ unsigned j;
+
+ switch (i) {
+ case 0:
+ break;
+ case 1: {
+ size_t sz = sizeof(arena_ind);
+ assert_d_eq(mallctl("arenas.create", (void *)&arena_ind,
+ &sz, NULL, 0), 0, "Unexpected mallctl failure");
+ break;
+ } case 2: {
+ size_t mib[3];
+ size_t miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.destroy",
+ mib, &miblen), 0,
+ "Unexpected mallctlnametomib failure");
+ mib[1] = arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
+ 0), 0, "Unexpected mallctlbymib failure");
+ break;
+ } default:
+ not_reached();
+ }
+
+ for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) {
+ parser_t parser;
+
+ parser_init(&parser, true);
+ malloc_stats_print(write_cb, (void *)&parser, opts[j]);
+ assert_false(parser_parse(&parser),
+ "Unexpected parse error, opts=\"%s\"", opts[j]);
+ parser_fini(&parser);
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_json_parser,
+ test_stats_print_json);
+}
diff --git a/deps/jemalloc/test/unit/test_hooks.c b/deps/jemalloc/test/unit/test_hooks.c
new file mode 100644
index 0000000..ded8698
--- /dev/null
+++ b/deps/jemalloc/test/unit/test_hooks.c
@@ -0,0 +1,38 @@
+#include "test/jemalloc_test.h"
+
+static bool hook_called = false;
+
+static void
+hook() {
+ hook_called = true;
+}
+
+static int
+func_to_hook(int arg1, int arg2) {
+ return arg1 + arg2;
+}
+
+#define func_to_hook JEMALLOC_HOOK(func_to_hook, test_hooks_libc_hook)
+
+TEST_BEGIN(unhooked_call) {
+ test_hooks_libc_hook = NULL;
+ hook_called = false;
+ assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
+ assert_false(hook_called, "Nulling out hook didn't take.");
+}
+TEST_END
+
+TEST_BEGIN(hooked_call) {
+ test_hooks_libc_hook = &hook;
+ hook_called = false;
+ assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
+ assert_true(hook_called, "Hook should have executed.");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ unhooked_call,
+ hooked_call);
+}
diff --git a/deps/jemalloc/test/unit/ticker.c b/deps/jemalloc/test/unit/ticker.c
new file mode 100644
index 0000000..e5790a3
--- /dev/null
+++ b/deps/jemalloc/test/unit/ticker.c
@@ -0,0 +1,73 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ticker.h"
+
+TEST_BEGIN(test_ticker_tick) {
+#define NREPS 2
+#define NTICKS 3
+ ticker_t ticker;
+ int32_t i, j;
+
+ ticker_init(&ticker, NTICKS);
+ for (i = 0; i < NREPS; i++) {
+ for (j = 0; j < NTICKS; j++) {
+ assert_u_eq(ticker_read(&ticker), NTICKS - j,
+ "Unexpected ticker value (i=%d, j=%d)", i, j);
+ assert_false(ticker_tick(&ticker),
+ "Unexpected ticker fire (i=%d, j=%d)", i, j);
+ }
+ assert_u32_eq(ticker_read(&ticker), 0,
+ "Expected ticker depletion");
+ assert_true(ticker_tick(&ticker),
+ "Expected ticker fire (i=%d)", i);
+ assert_u32_eq(ticker_read(&ticker), NTICKS,
+ "Expected ticker reset");
+ }
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_ticks) {
+#define NTICKS 3
+ ticker_t ticker;
+
+ ticker_init(&ticker, NTICKS);
+
+ assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+ assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
+ assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
+ assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
+ assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+
+ assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
+ assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+#undef NTICKS
+}
+TEST_END
+
+TEST_BEGIN(test_ticker_copy) {
+#define NTICKS 3
+ ticker_t ta, tb;
+
+ ticker_init(&ta, NTICKS);
+ ticker_copy(&tb, &ta);
+ assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+ assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
+ assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+
+ ticker_tick(&ta);
+ ticker_copy(&tb, &ta);
+ assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
+ assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
+ assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+#undef NTICKS
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_ticker_tick,
+ test_ticker_ticks,
+ test_ticker_copy);
+}
diff --git a/deps/jemalloc/test/unit/tsd.c b/deps/jemalloc/test/unit/tsd.c
new file mode 100644
index 0000000..917884d
--- /dev/null
+++ b/deps/jemalloc/test/unit/tsd.c
@@ -0,0 +1,267 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
+ * be asserting that we're on one.
+ */
+static bool originally_fast;
+static int data_cleanup_count;
+
+void
+data_cleanup(int *data) {
+ if (data_cleanup_count == 0) {
+ assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
+ "Argument passed into cleanup function should match tsd "
+ "value");
+ }
+ ++data_cleanup_count;
+
+ /*
+ * Allocate during cleanup for two rounds, in order to assure that
+ * jemalloc's internal tsd reinitialization happens.
+ */
+ bool reincarnate = false;
+ switch (*data) {
+ case MALLOC_TSD_TEST_DATA_INIT:
+ *data = 1;
+ reincarnate = true;
+ break;
+ case 1:
+ *data = 2;
+ reincarnate = true;
+ break;
+ case 2:
+ return;
+ default:
+ not_reached();
+ }
+
+ if (reincarnate) {
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpeced mallocx() failure");
+ dallocx(p, 0);
+ }
+}
+
+static void *
+thd_start(void *arg) {
+ int d = (int)(uintptr_t)arg;
+ void *p;
+
+ tsd_t *tsd = tsd_fetch();
+ assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
+ "Initial tsd get should return initialization value");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+
+ tsd_test_data_set(tsd, d);
+ assert_x_eq(tsd_test_data_get(tsd), d,
+ "After tsd set, tsd get should return value that was set");
+
+ d = 0;
+ assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
+ "Resetting local data should have no effect on tsd");
+
+ tsd_test_callback_set(tsd, &data_cleanup);
+
+ free(p);
+ return NULL;
+}
+
+TEST_BEGIN(test_tsd_main_thread) {
+ thd_start((void *)(uintptr_t)0xa5f3e329);
+}
+TEST_END
+
+TEST_BEGIN(test_tsd_sub_thread) {
+ thd_t thd;
+
+ data_cleanup_count = 0;
+ thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT);
+ thd_join(thd, NULL);
+ /*
+ * We reincarnate twice in the data cleanup, so it should execute at
+ * least 3 times.
+ */
+ assert_x_ge(data_cleanup_count, 3,
+ "Cleanup function should have executed multiple times.");
+}
+TEST_END
+
+static void *
+thd_start_reincarnated(void *arg) {
+ tsd_t *tsd = tsd_fetch();
+ assert(tsd);
+
+ void *p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+
+ /* Manually trigger reincarnation. */
+ assert_ptr_not_null(tsd_arena_get(tsd),
+ "Should have tsd arena set.");
+ tsd_cleanup((void *)tsd);
+ assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ "TSD arena should have been cleared.");
+ assert_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
+ "TSD state should be purgatory\n");
+
+ free(p);
+ assert_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
+ "TSD state should be reincarnated\n");
+ p = mallocx(1, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+ assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ "Should not have tsd arena set after reincarnation.");
+
+ free(p);
+ tsd_cleanup((void *)tsd);
+ assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ "TSD arena should have been cleared after 2nd cleanup.");
+
+ return NULL;
+}
+
+TEST_BEGIN(test_tsd_reincarnation) {
+ thd_t thd;
+ thd_create(&thd, thd_start_reincarnated, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+typedef struct {
+ atomic_u32_t phase;
+ atomic_b_t error;
+} global_slow_data_t;
+
+static void *
+thd_start_global_slow(void *arg) {
+ /* PHASE 0 */
+ global_slow_data_t *data = (global_slow_data_t *)arg;
+ free(mallocx(1, 0));
+
+ tsd_t *tsd = tsd_fetch();
+ /*
+ * No global slowness has happened yet; there was an error if we were
+ * originally fast but aren't now.
+ */
+ atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
+ ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
+
+ /* PHASE 2 */
+ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
+ }
+ free(mallocx(1, 0));
+ atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
+
+ /* PHASE 4 */
+ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
+ }
+ free(mallocx(1, 0));
+ atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
+
+ /* PHASE 6 */
+ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
+ }
+ free(mallocx(1, 0));
+ /* Only one decrement so far. */
+ atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
+
+ /* PHASE 8 */
+ while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
+ }
+ free(mallocx(1, 0));
+ /*
+ * Both decrements happened; we should be fast again (if we ever
+ * were)
+ */
+ atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
+ ATOMIC_SEQ_CST);
+ atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
+
+ return NULL;
+}
+
+TEST_BEGIN(test_tsd_global_slow) {
+ global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
+ /*
+ * Note that the "mallocx" here (vs. malloc) is important, since the
+ * compiler is allowed to optimize away free(malloc(1)) but not
+ * free(mallocx(1)).
+ */
+ free(mallocx(1, 0));
+ tsd_t *tsd = tsd_fetch();
+ originally_fast = tsd_fast(tsd);
+
+ thd_t thd;
+ thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
+ /* PHASE 1 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
+ /*
+ * We don't have a portable condvar/semaphore mechanism.
+ * Spin-wait.
+ */
+ }
+ assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ tsd_global_slow_inc(tsd_tsdn(tsd));
+ free(mallocx(1, 0));
+ assert_false(tsd_fast(tsd), "");
+ atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
+
+ /* PHASE 3 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
+ }
+ assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ /* Increase again, so that we can test multiple fast/slow changes. */
+ tsd_global_slow_inc(tsd_tsdn(tsd));
+ atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
+ free(mallocx(1, 0));
+ assert_false(tsd_fast(tsd), "");
+
+ /* PHASE 5 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
+ }
+ assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ tsd_global_slow_dec(tsd_tsdn(tsd));
+ atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
+ /* We only decreased once; things should still be slow. */
+ free(mallocx(1, 0));
+ assert_false(tsd_fast(tsd), "");
+
+ /* PHASE 7 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
+ }
+ assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ tsd_global_slow_dec(tsd_tsdn(tsd));
+ atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
+ /* We incremented and then decremented twice; we should be fast now. */
+ free(mallocx(1, 0));
+ assert_true(!originally_fast || tsd_fast(tsd), "");
+
+ /* PHASE 9 */
+ while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
+ }
+ assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void) {
+ /* Ensure tsd bootstrapped. */
+ if (nallocx(1, 0) == 0) {
+ malloc_printf("Initialization error");
+ return test_status_fail;
+ }
+
+ return test_no_reentrancy(
+ test_tsd_main_thread,
+ test_tsd_sub_thread,
+ test_tsd_reincarnation,
+ test_tsd_global_slow);
+}
diff --git a/deps/jemalloc/test/unit/witness.c b/deps/jemalloc/test/unit/witness.c
new file mode 100644
index 0000000..5986da4
--- /dev/null
+++ b/deps/jemalloc/test/unit/witness.c
@@ -0,0 +1,280 @@
+#include "test/jemalloc_test.h"
+
+static witness_lock_error_t *witness_lock_error_orig;
+static witness_owner_error_t *witness_owner_error_orig;
+static witness_not_owner_error_t *witness_not_owner_error_orig;
+static witness_depth_error_t *witness_depth_error_orig;
+
+static bool saw_lock_error;
+static bool saw_owner_error;
+static bool saw_not_owner_error;
+static bool saw_depth_error;
+
+static void
+witness_lock_error_intercept(const witness_list_t *witnesses,
+ const witness_t *witness) {
+ saw_lock_error = true;
+}
+
+static void
+witness_owner_error_intercept(const witness_t *witness) {
+ saw_owner_error = true;
+}
+
+static void
+witness_not_owner_error_intercept(const witness_t *witness) {
+ saw_not_owner_error = true;
+}
+
+static void
+witness_depth_error_intercept(const witness_list_t *witnesses,
+ witness_rank_t rank_inclusive, unsigned depth) {
+ saw_depth_error = true;
+}
+
+static int
+witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
+ assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
+
+ assert(oa == (void *)a);
+ assert(ob == (void *)b);
+
+ return strcmp(a->name, b->name);
+}
+
+static int
+witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b,
+ void *ob) {
+ assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
+
+ assert(oa == (void *)a);
+ assert(ob == (void *)b);
+
+ return -strcmp(a->name, b->name);
+}
+
+TEST_BEGIN(test_witness) {
+ witness_t a, b;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+ witness_assert_not_owner(&witness_tsdn, &a);
+ witness_lock(&witness_tsdn, &a);
+ witness_assert_owner(&witness_tsdn, &a);
+ witness_assert_depth(&witness_tsdn, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0);
+
+ witness_init(&b, "b", 2, NULL, NULL);
+ witness_assert_not_owner(&witness_tsdn, &b);
+ witness_lock(&witness_tsdn, &b);
+ witness_assert_owner(&witness_tsdn, &b);
+ witness_assert_depth(&witness_tsdn, 2);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0);
+
+ witness_unlock(&witness_tsdn, &a);
+ witness_assert_depth(&witness_tsdn, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0);
+ witness_unlock(&witness_tsdn, &b);
+
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+ witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_witness_comp) {
+ witness_t a, b, c, d;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_init(&a, "a", 1, witness_comp, &a);
+ witness_assert_not_owner(&witness_tsdn, &a);
+ witness_lock(&witness_tsdn, &a);
+ witness_assert_owner(&witness_tsdn, &a);
+ witness_assert_depth(&witness_tsdn, 1);
+
+ witness_init(&b, "b", 1, witness_comp, &b);
+ witness_assert_not_owner(&witness_tsdn, &b);
+ witness_lock(&witness_tsdn, &b);
+ witness_assert_owner(&witness_tsdn, &b);
+ witness_assert_depth(&witness_tsdn, 2);
+ witness_unlock(&witness_tsdn, &b);
+ witness_assert_depth(&witness_tsdn, 1);
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ witness_init(&c, "c", 1, witness_comp_reverse, &c);
+ witness_assert_not_owner(&witness_tsdn, &c);
+ assert_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(&witness_tsdn, &c);
+ assert_true(saw_lock_error, "Expected witness lock error");
+ witness_unlock(&witness_tsdn, &c);
+ witness_assert_depth(&witness_tsdn, 1);
+
+ saw_lock_error = false;
+
+ witness_init(&d, "d", 1, NULL, NULL);
+ witness_assert_not_owner(&witness_tsdn, &d);
+ assert_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(&witness_tsdn, &d);
+ assert_true(saw_lock_error, "Expected witness lock error");
+ witness_unlock(&witness_tsdn, &d);
+ witness_assert_depth(&witness_tsdn, 1);
+
+ witness_unlock(&witness_tsdn, &a);
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_lock_error = witness_lock_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_reversal) {
+ witness_t a, b;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+ witness_init(&b, "b", 2, NULL, NULL);
+
+ witness_lock(&witness_tsdn, &b);
+ witness_assert_depth(&witness_tsdn, 1);
+ assert_false(saw_lock_error, "Unexpected witness lock error");
+ witness_lock(&witness_tsdn, &a);
+ assert_true(saw_lock_error, "Expected witness lock error");
+
+ witness_unlock(&witness_tsdn, &a);
+ witness_assert_depth(&witness_tsdn, 1);
+ witness_unlock(&witness_tsdn, &b);
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_lock_error = witness_lock_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_recursive) {
+ witness_t a;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_not_owner_error_orig = witness_not_owner_error;
+ witness_not_owner_error = witness_not_owner_error_intercept;
+ saw_not_owner_error = false;
+
+ witness_lock_error_orig = witness_lock_error;
+ witness_lock_error = witness_lock_error_intercept;
+ saw_lock_error = false;
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+
+ witness_lock(&witness_tsdn, &a);
+ assert_false(saw_lock_error, "Unexpected witness lock error");
+ assert_false(saw_not_owner_error, "Unexpected witness not owner error");
+ witness_lock(&witness_tsdn, &a);
+ assert_true(saw_lock_error, "Expected witness lock error");
+ assert_true(saw_not_owner_error, "Expected witness not owner error");
+
+ witness_unlock(&witness_tsdn, &a);
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_owner_error = witness_owner_error_orig;
+ witness_lock_error = witness_lock_error_orig;
+
+}
+TEST_END
+
+TEST_BEGIN(test_witness_unlock_not_owned) {
+ witness_t a;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_owner_error_orig = witness_owner_error;
+ witness_owner_error = witness_owner_error_intercept;
+ saw_owner_error = false;
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+
+ assert_false(saw_owner_error, "Unexpected owner error");
+ witness_unlock(&witness_tsdn, &a);
+ assert_true(saw_owner_error, "Expected owner error");
+
+ witness_assert_lockless(&witness_tsdn);
+
+ witness_owner_error = witness_owner_error_orig;
+}
+TEST_END
+
+TEST_BEGIN(test_witness_depth) {
+ witness_t a;
+ witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
+
+ test_skip_if(!config_debug);
+
+ witness_depth_error_orig = witness_depth_error;
+ witness_depth_error = witness_depth_error_intercept;
+ saw_depth_error = false;
+
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+
+ witness_init(&a, "a", 1, NULL, NULL);
+
+ assert_false(saw_depth_error, "Unexpected depth error");
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+
+ witness_lock(&witness_tsdn, &a);
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+ assert_true(saw_depth_error, "Expected depth error");
+
+ witness_unlock(&witness_tsdn, &a);
+
+ witness_assert_lockless(&witness_tsdn);
+ witness_assert_depth(&witness_tsdn, 0);
+
+ witness_depth_error = witness_depth_error_orig;
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_witness,
+ test_witness_comp,
+ test_witness_reversal,
+ test_witness_recursive,
+ test_witness_unlock_not_owned,
+ test_witness_depth);
+}
diff --git a/deps/jemalloc/test/unit/zero.c b/deps/jemalloc/test/unit/zero.c
new file mode 100644
index 0000000..271fd5c
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero.c
@@ -0,0 +1,59 @@
+#include "test/jemalloc_test.h"
+
+static void
+test_zero(size_t sz_min, size_t sz_max) {
+ uint8_t *s;
+ size_t sz_prev, sz, i;
+#define MAGIC ((uint8_t)0x61)
+
+ sz_prev = 0;
+ s = (uint8_t *)mallocx(sz_min, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+
+ for (sz = sallocx(s, 0); sz <= sz_max;
+ sz_prev = sz, sz = sallocx(s, 0)) {
+ if (sz_prev > 0) {
+ assert_u_eq(s[0], MAGIC,
+ "Previously allocated byte %zu/%zu is corrupted",
+ ZU(0), sz_prev);
+ assert_u_eq(s[sz_prev-1], MAGIC,
+ "Previously allocated byte %zu/%zu is corrupted",
+ sz_prev-1, sz_prev);
+ }
+
+ for (i = sz_prev; i < sz; i++) {
+ assert_u_eq(s[i], 0x0,
+ "Newly allocated byte %zu/%zu isn't zero-filled",
+ i, sz);
+ s[i] = MAGIC;
+ }
+
+ if (xallocx(s, sz+1, 0, 0) == sz) {
+ s = (uint8_t *)rallocx(s, sz+1, 0);
+ assert_ptr_not_null((void *)s,
+ "Unexpected rallocx() failure");
+ }
+ }
+
+ dallocx(s, 0);
+#undef MAGIC
+}
+
+TEST_BEGIN(test_zero_small) {
+ test_skip_if(!config_fill);
+ test_zero(1, SC_SMALL_MAXCLASS - 1);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_large) {
+ test_skip_if(!config_fill);
+ test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1));
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_zero_small,
+ test_zero_large);
+}
diff --git a/deps/jemalloc/test/unit/zero.sh b/deps/jemalloc/test/unit/zero.sh
new file mode 100644
index 0000000..b4540b2
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="abort:false,junk:false,zero:true"
+fi