summaryrefslogtreecommitdiffstats
path: root/deps/jemalloc/test/integration
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/test/integration')
-rw-r--r--deps/jemalloc/test/integration/MALLOCX_ARENA.c66
-rw-r--r--deps/jemalloc/test/integration/aligned_alloc.c157
-rw-r--r--deps/jemalloc/test/integration/allocated.c124
-rw-r--r--deps/jemalloc/test/integration/cpp/basic.cpp24
-rw-r--r--deps/jemalloc/test/integration/cpp/infallible_new_false.cpp23
-rw-r--r--deps/jemalloc/test/integration/cpp/infallible_new_false.sh8
-rw-r--r--deps/jemalloc/test/integration/cpp/infallible_new_true.cpp67
-rw-r--r--deps/jemalloc/test/integration/cpp/infallible_new_true.sh8
-rw-r--r--deps/jemalloc/test/integration/extent.c287
-rw-r--r--deps/jemalloc/test/integration/extent.sh5
-rw-r--r--deps/jemalloc/test/integration/malloc.c16
-rw-r--r--deps/jemalloc/test/integration/mallocx.c274
-rw-r--r--deps/jemalloc/test/integration/mallocx.sh5
-rw-r--r--deps/jemalloc/test/integration/overflow.c59
-rw-r--r--deps/jemalloc/test/integration/posix_memalign.c128
-rw-r--r--deps/jemalloc/test/integration/rallocx.c308
-rw-r--r--deps/jemalloc/test/integration/sdallocx.c55
-rw-r--r--deps/jemalloc/test/integration/slab_sizes.c80
-rw-r--r--deps/jemalloc/test/integration/slab_sizes.sh4
-rw-r--r--deps/jemalloc/test/integration/smallocx.c312
-rw-r--r--deps/jemalloc/test/integration/smallocx.sh5
-rw-r--r--deps/jemalloc/test/integration/thread_arena.c86
-rw-r--r--deps/jemalloc/test/integration/thread_tcache_enabled.c87
-rw-r--r--deps/jemalloc/test/integration/xallocx.c384
-rw-r--r--deps/jemalloc/test/integration/xallocx.sh5
25 files changed, 2577 insertions, 0 deletions
diff --git a/deps/jemalloc/test/integration/MALLOCX_ARENA.c b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
new file mode 100644
index 0000000..7e61df0
--- /dev/null
+++ b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
@@ -0,0 +1,66 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+static bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg) {
+ unsigned thread_ind = (unsigned)(uintptr_t)arg;
+ unsigned arena_ind;
+ void *p;
+ size_t sz;
+
+ sz = sizeof(arena_ind);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Error in arenas.create");
+
+ if (thread_ind % 4 != 3) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ const char *dss_precs[] = {"disabled", "primary", "secondary"};
+ unsigned prec_ind = thread_ind %
+ (sizeof(dss_precs)/sizeof(char*));
+ const char *dss = dss_precs[prec_ind];
+ int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
+ expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Error in mallctlnametomib()");
+ mib[1] = arena_ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
+ sizeof(const char *)), expected_err,
+ "Error in mallctlbymib()");
+ }
+
+ p = mallocx(1, MALLOCX_ARENA(arena_ind));
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ return NULL;
+}
+
+TEST_BEGIN(test_MALLOCX_ARENA) {
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)(uintptr_t)i);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_MALLOCX_ARENA);
+}
diff --git a/deps/jemalloc/test/integration/aligned_alloc.c b/deps/jemalloc/test/integration/aligned_alloc.c
new file mode 100644
index 0000000..b37d5ba
--- /dev/null
+++ b/deps/jemalloc/test/integration/aligned_alloc.c
@@ -0,0 +1,157 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors) {
+ size_t alignment;
+ void *p;
+
+ alignment = 0;
+ set_errno(0);
+ p = aligned_alloc(alignment, 1);
+ expect_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu", alignment);
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ set_errno(0);
+ p = aligned_alloc(alignment + 1, 1);
+ expect_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_oom_errors) {
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ expect_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ expect_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ set_errno(0);
+ p = aligned_alloc(alignment, size);
+ expect_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_alignment_and_size) {
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 1;
+ size < 3 * alignment && size < (1U << 31);
+ size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ ps[i] = aligned_alloc(alignment, size);
+ if (ps[i] == NULL) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += TEST_MALLOC_SIZE(ps[i]);
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+TEST_BEGIN(test_zero_alloc) {
+ void *res = aligned_alloc(8, 0);
+ assert(res);
+ size_t usable = TEST_MALLOC_SIZE(res);
+ assert(usable > 0);
+ free(res);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size,
+ test_zero_alloc);
+}
diff --git a/deps/jemalloc/test/integration/allocated.c b/deps/jemalloc/test/integration/allocated.c
new file mode 100644
index 0000000..0c64272
--- /dev/null
+++ b/deps/jemalloc/test/integration/allocated.c
@@ -0,0 +1,124 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg) {
+ int err;
+ void *p;
+ uint64_t a0, a1, d0, d1;
+ uint64_t *ap0, *ap1, *dp0, *dp1;
+ size_t sz, usize;
+
+ sz = sizeof(a0);
+ if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(ap0);
+ if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ expect_u64_eq(*ap0, a0,
+ "\"thread.allocatedp\" should provide a pointer to internal "
+ "storage");
+
+ sz = sizeof(d0);
+ if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(dp0);
+ if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL,
+ 0))) {
+ if (err == ENOENT) {
+ goto label_ENOENT;
+ }
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ expect_u64_eq(*dp0, d0,
+ "\"thread.deallocatedp\" should provide a pointer to internal "
+ "storage");
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Unexpected malloc() error");
+
+ sz = sizeof(a1);
+ mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0);
+ sz = sizeof(ap1);
+ mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0);
+ expect_u64_eq(*ap1, a1,
+ "Dereferenced \"thread.allocatedp\" value should equal "
+ "\"thread.allocated\" value");
+ expect_ptr_eq(ap0, ap1,
+ "Pointer returned by \"thread.allocatedp\" should not change");
+
+ usize = TEST_MALLOC_SIZE(p);
+ expect_u64_le(a0 + usize, a1,
+ "Allocated memory counter should increase by at least the amount "
+ "explicitly allocated");
+
+ free(p);
+
+ sz = sizeof(d1);
+ mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0);
+ sz = sizeof(dp1);
+ mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0);
+ expect_u64_eq(*dp1, d1,
+ "Dereferenced \"thread.deallocatedp\" value should equal "
+ "\"thread.deallocated\" value");
+ expect_ptr_eq(dp0, dp1,
+ "Pointer returned by \"thread.deallocatedp\" should not change");
+
+ expect_u64_le(d0 + usize, d1,
+ "Deallocated memory counter should increase by at least the amount "
+ "explicitly deallocated");
+
+ return NULL;
+label_ENOENT:
+ expect_false(config_stats,
+ "ENOENT should only be returned if stats are disabled");
+ test_skip("\"thread.allocated\" mallctl not available");
+ return NULL;
+}
+
+TEST_BEGIN(test_main_thread) {
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread) {
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void) {
+ /* Run tests multiple times to check for bad interactions. */
+ return test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread);
+}
diff --git a/deps/jemalloc/test/integration/cpp/basic.cpp b/deps/jemalloc/test/integration/cpp/basic.cpp
new file mode 100644
index 0000000..c1cf6cd
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/basic.cpp
@@ -0,0 +1,24 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_basic) {
+ auto foo = new long(4);
+ expect_ptr_not_null(foo, "Unexpected new[] failure");
+ delete foo;
+ // Test nullptr handling.
+ foo = nullptr;
+ delete foo;
+
+ auto bar = new long;
+ expect_ptr_not_null(bar, "Unexpected new failure");
+ delete bar;
+ // Test nullptr handling.
+ bar = nullptr;
+ delete bar;
+}
+TEST_END
+
+int
+main() {
+ return test(
+ test_basic);
+}
diff --git a/deps/jemalloc/test/integration/cpp/infallible_new_false.cpp b/deps/jemalloc/test/integration/cpp/infallible_new_false.cpp
new file mode 100644
index 0000000..42196d6
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/infallible_new_false.cpp
@@ -0,0 +1,23 @@
+#include <memory>
+
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_failing_alloc) {
+ bool saw_exception = false;
+ try {
+ /* Too big of an allocation to succeed. */
+ void *volatile ptr = ::operator new((size_t)-1);
+ (void)ptr;
+ } catch (...) {
+ saw_exception = true;
+ }
+ expect_true(saw_exception, "Didn't get a failure");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_failing_alloc);
+}
+
diff --git a/deps/jemalloc/test/integration/cpp/infallible_new_false.sh b/deps/jemalloc/test/integration/cpp/infallible_new_false.sh
new file mode 100644
index 0000000..7d41812
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/infallible_new_false.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+XMALLOC_STR=""
+if [ "x${enable_xmalloc}" = "x1" ] ; then
+ XMALLOC_STR="xmalloc:false,"
+fi
+
+export MALLOC_CONF="${XMALLOC_STR}experimental_infallible_new:false"
diff --git a/deps/jemalloc/test/integration/cpp/infallible_new_true.cpp b/deps/jemalloc/test/integration/cpp/infallible_new_true.cpp
new file mode 100644
index 0000000..d675412
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/infallible_new_true.cpp
@@ -0,0 +1,67 @@
+#include <stdio.h>
+
+#include "test/jemalloc_test.h"
+
+/*
+ * We can't test C++ in unit tests. In order to intercept abort, use a secret
+ * safety check abort hook in integration tests.
+ */
+typedef void (*abort_hook_t)(const char *message);
+bool fake_abort_called;
+void fake_abort(const char *message) {
+ if (strcmp(message, "<jemalloc>: Allocation failed and "
+ "opt.experimental_infallible_new is true. Aborting.\n") != 0) {
+ abort();
+ }
+ fake_abort_called = true;
+}
+
+static bool
+own_operator_new(void) {
+ uint64_t before, after;
+ size_t sz = sizeof(before);
+
+ /* thread.allocated is always available, even w/o config_stats. */
+ expect_d_eq(mallctl("thread.allocated", (void *)&before, &sz, NULL, 0),
+ 0, "Unexpected mallctl failure reading stats");
+ void *volatile ptr = ::operator new((size_t)8);
+ expect_ptr_not_null(ptr, "Unexpected allocation failure");
+ expect_d_eq(mallctl("thread.allocated", (void *)&after, &sz, NULL, 0),
+ 0, "Unexpected mallctl failure reading stats");
+
+ return (after != before);
+}
+
+TEST_BEGIN(test_failing_alloc) {
+ abort_hook_t abort_hook = &fake_abort;
+ expect_d_eq(mallctl("experimental.hooks.safety_check_abort", NULL, NULL,
+ (void *)&abort_hook, sizeof(abort_hook)), 0,
+ "Unexpected mallctl failure setting abort hook");
+
+ /*
+ * Not owning operator new is only expected to happen on MinGW which
+ * does not support operator new / delete replacement.
+ */
+#ifdef _WIN32
+ test_skip_if(!own_operator_new());
+#else
+ expect_true(own_operator_new(), "No operator new overload");
+#endif
+ void *volatile ptr = (void *)1;
+ try {
+ /* Too big of an allocation to succeed. */
+ ptr = ::operator new((size_t)-1);
+ } catch (...) {
+ abort();
+ }
+ expect_ptr_null(ptr, "Allocation should have failed");
+ expect_b_eq(fake_abort_called, true, "Abort hook not invoked");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_failing_alloc);
+}
+
diff --git a/deps/jemalloc/test/integration/cpp/infallible_new_true.sh b/deps/jemalloc/test/integration/cpp/infallible_new_true.sh
new file mode 100644
index 0000000..4a0ff54
--- /dev/null
+++ b/deps/jemalloc/test/integration/cpp/infallible_new_true.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+XMALLOC_STR=""
+if [ "x${enable_xmalloc}" = "x1" ] ; then
+ XMALLOC_STR="xmalloc:false,"
+fi
+
+export MALLOC_CONF="${XMALLOC_STR}experimental_infallible_new:true"
diff --git a/deps/jemalloc/test/integration/extent.c b/deps/jemalloc/test/integration/extent.c
new file mode 100644
index 0000000..7a028f1
--- /dev/null
+++ b/deps/jemalloc/test/integration/extent.c
@@ -0,0 +1,287 @@
+#include "test/jemalloc_test.h"
+
+#include "test/extent_hooks.h"
+
+#include "jemalloc/internal/arena_types.h"
+
+static void
+test_extent_body(unsigned arena_ind) {
+ void *p;
+ size_t large0, large1, large2, sz;
+ size_t purge_mib[3];
+ size_t purge_miblen;
+ int flags;
+ bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
+
+ flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ /* Get large size classes. */
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.0.size failure");
+ expect_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.1.size failure");
+ expect_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL,
+ 0), 0, "Unexpected arenas.lextent.2.size failure");
+
+ /* Test dalloc/decommit/purge cascade. */
+ purge_miblen = sizeof(purge_mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
+ 0, "Unexpected mallctlnametomib() failure");
+ purge_mib[1] = (size_t)arena_ind;
+ called_alloc = false;
+ try_alloc = true;
+ try_dalloc = false;
+ try_decommit = false;
+ p = mallocx(large0 * 2, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ expect_true(called_alloc, "Expected alloc call");
+ called_dalloc = false;
+ called_decommit = false;
+ did_purge_lazy = false;
+ did_purge_forced = false;
+ called_split = false;
+ xallocx_success_a = (xallocx(p, large0, 0, flags) == large0);
+ expect_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_a) {
+ expect_true(called_dalloc, "Expected dalloc call");
+ expect_true(called_decommit, "Expected decommit call");
+ expect_true(did_purge_lazy || did_purge_forced,
+ "Expected purge");
+ expect_true(called_split, "Expected split call");
+ }
+ dallocx(p, flags);
+ try_dalloc = true;
+
+ /* Test decommit/commit and observe split/merge. */
+ try_dalloc = false;
+ try_decommit = true;
+ p = mallocx(large0 * 2, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ did_decommit = false;
+ did_commit = false;
+ called_split = false;
+ did_split = false;
+ did_merge = false;
+ xallocx_success_b = (xallocx(p, large0, 0, flags) == large0);
+ expect_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
+ 0, "Unexpected arena.%u.purge error", arena_ind);
+ if (xallocx_success_b) {
+ expect_true(did_split, "Expected split");
+ }
+ xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2);
+ if (did_split) {
+ expect_b_eq(did_decommit, did_commit,
+ "Expected decommit/commit match");
+ }
+ if (xallocx_success_b && xallocx_success_c) {
+ expect_true(did_merge, "Expected merge");
+ }
+ dallocx(p, flags);
+ try_dalloc = true;
+ try_decommit = false;
+
+ /* Make sure non-large allocation succeeds. */
+ p = mallocx(42, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, flags);
+}
+
+static void
+test_manual_hook_auto_arena(void) {
+ unsigned narenas;
+ size_t old_size, new_size, sz;
+ size_t hooks_mib[3];
+ size_t hooks_miblen;
+ extent_hooks_t *new_hooks, *old_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ /* Get number of auto arenas. */
+ expect_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+ if (narenas == 1) {
+ return;
+ }
+
+ /* Install custom extent hooks on arena 1 (might not be initialized). */
+ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
+ &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
+ hooks_mib[1] = 1;
+ old_size = sizeof(extent_hooks_t *);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, (void *)&new_hooks, new_size), 0,
+ "Unexpected extent_hooks error");
+ static bool auto_arena_created = false;
+ if (old_hooks != &hooks) {
+ expect_b_eq(auto_arena_created, false,
+ "Expected auto arena 1 created only once.");
+ auto_arena_created = true;
+ }
+}
+
+static void
+test_manual_hook_body(void) {
+ unsigned arena_ind;
+ size_t old_size, new_size, sz;
+ size_t hooks_mib[3];
+ size_t hooks_miblen;
+ extent_hooks_t *new_hooks, *old_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Unexpected mallctl() failure");
+
+ /* Install custom extent hooks. */
+ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
+ &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
+ hooks_mib[1] = (size_t)arena_ind;
+ old_size = sizeof(extent_hooks_t *);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, (void *)&new_hooks, new_size), 0,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->alloc, extent_alloc_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->dalloc, extent_dalloc_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->commit, extent_commit_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->decommit, extent_decommit_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->split, extent_split_hook,
+ "Unexpected extent_hooks error");
+ expect_ptr_ne(old_hooks->merge, extent_merge_hook,
+ "Unexpected extent_hooks error");
+
+ if (!is_background_thread_enabled()) {
+ test_extent_body(arena_ind);
+ }
+
+ /* Restore extent hooks. */
+ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
+ (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error");
+ expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
+ &old_size, NULL, 0), 0, "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->alloc, default_hooks->alloc,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->dalloc, default_hooks->dalloc,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->commit, default_hooks->commit,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->decommit, default_hooks->decommit,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->split, default_hooks->split,
+ "Unexpected extent_hooks error");
+ expect_ptr_eq(old_hooks->merge, default_hooks->merge,
+ "Unexpected extent_hooks error");
+}
+
+TEST_BEGIN(test_extent_manual_hook) {
+ test_manual_hook_auto_arena();
+ test_manual_hook_body();
+
+ /* Test failure paths. */
+ try_split = false;
+ test_manual_hook_body();
+ try_merge = false;
+ test_manual_hook_body();
+ try_purge_lazy = false;
+ try_purge_forced = false;
+ test_manual_hook_body();
+
+ try_split = try_merge = try_purge_lazy = try_purge_forced = true;
+}
+TEST_END
+
+TEST_BEGIN(test_extent_auto_hook) {
+ unsigned arena_ind;
+ size_t new_size, sz;
+ extent_hooks_t *new_hooks;
+
+ extent_hooks_prep();
+
+ sz = sizeof(unsigned);
+ new_hooks = &hooks;
+ new_size = sizeof(extent_hooks_t *);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure");
+
+ test_skip_if(is_background_thread_enabled());
+ test_extent_body(arena_ind);
+}
+TEST_END
+
+static void
+test_arenas_create_ext_base(arena_config_t config,
+ bool expect_hook_data, bool expect_hook_metadata)
+{
+ unsigned arena, arena1;
+ void *ptr;
+ size_t sz = sizeof(unsigned);
+
+ extent_hooks_prep();
+
+ called_alloc = false;
+ expect_d_eq(mallctl("experimental.arenas_create_ext",
+ (void *)&arena, &sz, &config, sizeof(arena_config_t)), 0,
+ "Unexpected mallctl() failure");
+ expect_b_eq(called_alloc, expect_hook_metadata,
+ "expected hook metadata alloc mismatch");
+
+ called_alloc = false;
+ ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
+ expect_b_eq(called_alloc, expect_hook_data,
+ "expected hook data alloc mismatch");
+
+ expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
+ expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
+ 0, "Unexpected mallctl() failure");
+ expect_u_eq(arena, arena1, "Unexpected arena index");
+ dallocx(ptr, 0);
+}
+
+TEST_BEGIN(test_arenas_create_ext_with_ehooks_no_metadata) {
+ arena_config_t config;
+ config.extent_hooks = &hooks;
+ config.metadata_use_hooks = false;
+
+ test_arenas_create_ext_base(config, true, false);
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_create_ext_with_ehooks_with_metadata) {
+ arena_config_t config;
+ config.extent_hooks = &hooks;
+ config.metadata_use_hooks = true;
+
+ test_arenas_create_ext_base(config, true, true);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_extent_manual_hook,
+ test_extent_auto_hook,
+ test_arenas_create_ext_with_ehooks_no_metadata,
+ test_arenas_create_ext_with_ehooks_with_metadata);
+}
diff --git a/deps/jemalloc/test/integration/extent.sh b/deps/jemalloc/test/integration/extent.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/extent.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/malloc.c b/deps/jemalloc/test/integration/malloc.c
new file mode 100644
index 0000000..ef44916
--- /dev/null
+++ b/deps/jemalloc/test/integration/malloc.c
@@ -0,0 +1,16 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_zero_alloc) {
+ void *res = malloc(0);
+ assert(res);
+ size_t usable = TEST_MALLOC_SIZE(res);
+ assert(usable > 0);
+ free(res);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_zero_alloc);
+}
diff --git a/deps/jemalloc/test/integration/mallocx.c b/deps/jemalloc/test/integration/mallocx.c
new file mode 100644
index 0000000..fdf1e3f
--- /dev/null
+++ b/deps/jemalloc/test/integration/mallocx.c
@@ -0,0 +1,274 @@
+#include "test/jemalloc_test.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ expect_ptr_null(mallocx(largemax+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", largemax+1);
+
+ expect_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ expect_ptr_null(mallocx(SIZE_T_MAX, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+ expect_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+static void *
+remote_alloc(void *arg) {
+ unsigned arena;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ size_t large_sz;
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena)
+ | MALLOCX_TCACHE_NONE);
+ void **ret = (void **)arg;
+ *ret = ptr;
+
+ return NULL;
+}
+
+TEST_BEGIN(test_remote_free) {
+ thd_t thd;
+ void *ret;
+ thd_create(&thd, remote_alloc, (void *)&ret);
+ thd_join(thd, NULL);
+ expect_ptr_not_null(ret, "Unexpected mallocx failure");
+
+ /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
+ dallocx(ret, 0);
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_oom) {
+ size_t largemax;
+ bool oom;
+ void *ptrs[3];
+ unsigned i;
+
+ /*
+ * It should be impossible to allocate three objects that each consume
+ * nearly half the virtual address space.
+ */
+ largemax = get_large_size(get_nlarge()-1);
+ oom = false;
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ ptrs[i] = mallocx(largemax, MALLOCX_ARENA(0));
+ if (ptrs[i] == NULL) {
+ oom = true;
+ }
+ }
+ expect_true(oom,
+ "Expected OOM during series of calls to mallocx(size=%zu, 0)",
+ largemax);
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ if (ptrs[i] != NULL) {
+ dallocx(ptrs[i], 0);
+ }
+ }
+ purge();
+
+#if LG_SIZEOF_PTR == 3
+ expect_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x8000000000000000ULL)),
+ "Expected OOM for mallocx()");
+ expect_ptr_null(mallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x80000000)),
+ "Expected OOM for mallocx()");
+#else
+ expect_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
+ "Expected OOM for mallocx()");
+#endif
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_basic) {
+#define MAXSZ (((size_t)1) << 23)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ size_t nsz, rsz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, 0);
+ expect_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ rsz = sallocx(p, 0);
+ expect_zu_ge(rsz, sz, "Real size smaller than expected");
+ expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ dallocx(p, 0);
+
+ p = mallocx(sz, 0);
+ expect_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ZERO);
+ expect_ptr_not_null(p,
+ "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
+ nsz);
+ rsz = sallocx(p, 0);
+ expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ dallocx(p, 0);
+ purge();
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ const char *percpu_arena;
+ size_t sz = sizeof(percpu_arena);
+
+ if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
+ strcmp(percpu_arena, "disabled") != 0) {
+ test_skip("test_alignment_and_size skipped: "
+ "not working with percpu arena.");
+ };
+#define MAXALIGN (((size_t)1) << 23)
+#define NITER 4
+ size_t nsz, rsz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO | MALLOCX_ARENA(0));
+ expect_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO | MALLOCX_ARENA(0));
+ expect_ptr_not_null(ps[i],
+ "mallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ expect_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef MAXALIGN
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_overflow,
+ test_oom,
+ test_remote_free,
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/mallocx.sh b/deps/jemalloc/test/integration/mallocx.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/mallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/overflow.c b/deps/jemalloc/test/integration/overflow.c
new file mode 100644
index 0000000..ce63327
--- /dev/null
+++ b/deps/jemalloc/test/integration/overflow.c
@@ -0,0 +1,59 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ unsigned nlextents;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+ void *p;
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ 0), 0, "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nlextents - 1;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
+ NULL, 0), 0, "Unexpected mallctlbymib() error");
+
+ expect_ptr_null(malloc(max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ expect_ptr_null(malloc(SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ expect_ptr_null(calloc(1, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ expect_ptr_null(calloc(1, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Unexpected malloc() OOM");
+ expect_ptr_null(realloc(p, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ expect_ptr_null(realloc(p, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+ free(p);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+int
+main(void) {
+ return test(
+ test_overflow);
+}
diff --git a/deps/jemalloc/test/integration/posix_memalign.c b/deps/jemalloc/test/integration/posix_memalign.c
new file mode 100644
index 0000000..2da0549
--- /dev/null
+++ b/deps/jemalloc/test/integration/posix_memalign.c
@@ -0,0 +1,128 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 23)
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+TEST_BEGIN(test_alignment_errors) {
+ size_t alignment;
+ void *p;
+
+ for (alignment = 0; alignment < sizeof(void *); alignment++) {
+ expect_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment);
+ }
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ expect_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors) {
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ expect_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ expect_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ expect_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+#define NITER 4
+ size_t alignment, size, total;
+ unsigned i;
+ int err;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 0;
+ size < 3 * alignment && size < (1U << 31);
+ size += ((size == 0) ? 1 :
+ (alignment >> (LG_SIZEOF_PTR-1)) - 1)) {
+ for (i = 0; i < NITER; i++) {
+ err = posix_memalign(&ps[i],
+ alignment, size);
+ if (err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += TEST_MALLOC_SIZE(ps[i]);
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/rallocx.c b/deps/jemalloc/test/integration/rallocx.c
new file mode 100644
index 0000000..68b8f38
--- /dev/null
+++ b/deps/jemalloc/test/integration/rallocx.c
@@ -0,0 +1,308 @@
+#include "test/jemalloc_test.h"
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+TEST_BEGIN(test_grow_and_shrink) {
+ /*
+ * Use volatile to workaround buffer overflow false positives
+ * (-D_FORTIFY_SOURCE=3).
+ */
+ void *volatile p, *volatile q;
+ size_t tsz;
+#define NCYCLES 3
+ unsigned i, j;
+#define NSZS 1024
+ size_t szs[NSZS];
+#define MAXSZ ZU(12 * 1024 * 1024)
+
+ p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ szs[0] = sallocx(p, 0);
+
+ for (i = 0; i < NCYCLES; i++) {
+ for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
+ q = rallocx(p, szs[j-1]+1, 0);
+ expect_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j-1], szs[j-1]+1);
+ szs[j] = sallocx(q, 0);
+ expect_zu_ne(szs[j], szs[j-1]+1,
+ "Expected size to be at least: %zu", szs[j-1]+1);
+ p = q;
+ }
+
+ for (j--; j > 0; j--) {
+ q = rallocx(p, szs[j-1], 0);
+ expect_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j], szs[j-1]);
+ tsz = sallocx(q, 0);
+ expect_zu_eq(tsz, szs[j-1],
+ "Expected size=%zu, got size=%zu", szs[j-1], tsz);
+ p = q;
+ }
+ }
+
+ dallocx(p, 0);
+#undef MAXSZ
+#undef NSZS
+#undef NCYCLES
+}
+TEST_END
+
+static bool
+validate_fill(void *p, uint8_t c, size_t offset, size_t len) {
+ bool ret = false;
+ /*
+ * Use volatile to workaround buffer overflow false positives
+ * (-D_FORTIFY_SOURCE=3).
+ */
+ uint8_t *volatile buf = (uint8_t *)p;
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ uint8_t b = buf[offset+i];
+ if (b != c) {
+ test_fail("Allocation at %p (len=%zu) contains %#x "
+ "rather than %#x at offset %zu", p, len, b, c,
+ offset+i);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+TEST_BEGIN(test_zero) {
+ /*
+ * Use volatile to workaround buffer overflow false positives
+ * (-D_FORTIFY_SOURCE=3).
+ */
+ void *volatile p, *volatile q;
+ size_t psz, qsz, i, j;
+ size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
+#define FILL_BYTE 0xaaU
+#define RANGE 2048
+
+ for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
+ size_t start_size = start_sizes[i];
+ p = mallocx(start_size, MALLOCX_ZERO);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ psz = sallocx(p, 0);
+
+ expect_false(validate_fill(p, 0, 0, psz),
+ "Expected zeroed memory");
+ memset(p, FILL_BYTE, psz);
+ expect_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+
+ for (j = 1; j < RANGE; j++) {
+ q = rallocx(p, start_size+j, MALLOCX_ZERO);
+ expect_ptr_not_null(q, "Unexpected rallocx() error");
+ qsz = sallocx(q, 0);
+ if (q != p || qsz != psz) {
+ expect_false(validate_fill(q, FILL_BYTE, 0,
+ psz), "Expected filled memory");
+ expect_false(validate_fill(q, 0, psz, qsz-psz),
+ "Expected zeroed memory");
+ }
+ if (psz != qsz) {
+ memset((void *)((uintptr_t)q+psz), FILL_BYTE,
+ qsz-psz);
+ psz = qsz;
+ }
+ p = q;
+ }
+ expect_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+ dallocx(p, 0);
+ }
+#undef FILL_BYTE
+}
+TEST_END
+
+TEST_BEGIN(test_align) {
+ void *p, *q;
+ size_t align;
+#define MAX_ALIGN (ZU(1) << 25)
+
+ align = ZU(1);
+ p = mallocx(1, MALLOCX_ALIGN(align));
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
+ q = rallocx(p, 1, MALLOCX_ALIGN(align));
+ expect_ptr_not_null(q,
+ "Unexpected rallocx() error for align=%zu", align);
+ expect_ptr_null(
+ (void *)((uintptr_t)q & (align-1)),
+ "%p inadequately aligned for align=%zu",
+ q, align);
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_ALIGN
+}
+TEST_END
+
+TEST_BEGIN(test_align_enum) {
+/* Span both small sizes and large sizes. */
+#define LG_MIN 12
+#define LG_MAX 15
+ for (size_t lg_align = LG_MIN; lg_align <= LG_MAX; ++lg_align) {
+ for (size_t lg_size = LG_MIN; lg_size <= LG_MAX; ++lg_size) {
+ size_t size = 1 << lg_size;
+ for (size_t lg_align_next = LG_MIN;
+ lg_align_next <= LG_MAX; ++lg_align_next) {
+ int flags = MALLOCX_LG_ALIGN(lg_align);
+ void *p = mallocx(1, flags);
+ assert_ptr_not_null(p,
+ "Unexpected mallocx() error");
+ assert_zu_eq(nallocx(1, flags),
+ TEST_MALLOC_SIZE(p),
+ "Wrong mallocx() usable size");
+ int flags_next =
+ MALLOCX_LG_ALIGN(lg_align_next);
+ p = rallocx(p, size, flags_next);
+ assert_ptr_not_null(p,
+ "Unexpected rallocx() error");
+ expect_zu_eq(nallocx(size, flags_next),
+ TEST_MALLOC_SIZE(p),
+ "Wrong rallocx() usable size");
+ free(p);
+ }
+ }
+ }
+#undef LG_MAX
+#undef LG_MIN
+}
+TEST_END
+
+TEST_BEGIN(test_lg_align_and_zero) {
+ /*
+ * Use volatile to workaround buffer overflow false positives
+ * (-D_FORTIFY_SOURCE=3).
+ */
+ void *volatile p, *volatile q;
+ unsigned lg_align;
+ size_t sz;
+#define MAX_LG_ALIGN 25
+#define MAX_VALIDATE (ZU(1) << 22)
+
+ lg_align = 0;
+ p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
+ q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ expect_ptr_not_null(q,
+ "Unexpected rallocx() error for lg_align=%u", lg_align);
+ expect_ptr_null(
+ (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
+ "%p inadequately aligned for lg_align=%u", q, lg_align);
+ sz = sallocx(q, 0);
+ if ((sz << 1) <= MAX_VALIDATE) {
+ expect_false(validate_fill(q, 0, 0, sz),
+ "Expected zeroed memory");
+ } else {
+ expect_false(validate_fill(q, 0, 0, MAX_VALIDATE),
+ "Expected zeroed memory");
+ expect_false(validate_fill(
+ (void *)((uintptr_t)q+sz-MAX_VALIDATE),
+ 0, 0, MAX_VALIDATE), "Expected zeroed memory");
+ }
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_VALIDATE
+#undef MAX_LG_ALIGN
+}
+TEST_END
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+ void *p;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ expect_ptr_null(rallocx(p, largemax+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1);
+
+ expect_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ expect_ptr_null(rallocx(p, SIZE_T_MAX, 0),
+ "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
+
+ expect_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
+ "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+int
+main(void) {
+ return test(
+ test_grow_and_shrink,
+ test_zero,
+ test_align,
+ test_align_enum,
+ test_lg_align_and_zero,
+ test_overflow);
+}
diff --git a/deps/jemalloc/test/integration/sdallocx.c b/deps/jemalloc/test/integration/sdallocx.c
new file mode 100644
index 0000000..ca01448
--- /dev/null
+++ b/deps/jemalloc/test/integration/sdallocx.c
@@ -0,0 +1,55 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 22)
+#define NITER 3
+
+TEST_BEGIN(test_basic) {
+ void *ptr = mallocx(64, 0);
+ sdallocx(ptr, 64, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ size_t nsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ total += nsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ sdallocx(ps[i], sz,
+ MALLOCX_ALIGN(alignment));
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/slab_sizes.c b/deps/jemalloc/test/integration/slab_sizes.c
new file mode 100644
index 0000000..f6a66f2
--- /dev/null
+++ b/deps/jemalloc/test/integration/slab_sizes.c
@@ -0,0 +1,80 @@
+#include "test/jemalloc_test.h"
+
+/* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */
+
+TEST_BEGIN(test_slab_sizes) {
+ unsigned nbins;
+ size_t page;
+ size_t sizemib[4];
+ size_t slabmib[4];
+ size_t len;
+
+ len = sizeof(nbins);
+ expect_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
+ "nbins mallctl failure");
+
+ len = sizeof(page);
+ expect_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0,
+ "page mallctl failure");
+
+ len = 4;
+ expect_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0,
+ "bin size mallctlnametomib failure");
+
+ len = 4;
+ expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len),
+ 0, "slab size mallctlnametomib failure");
+
+ size_t biggest_slab_seen = 0;
+
+ for (unsigned i = 0; i < nbins; i++) {
+ size_t bin_size;
+ size_t slab_size;
+ len = sizeof(size_t);
+ sizemib[2] = i;
+ slabmib[2] = i;
+ expect_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len,
+ NULL, 0), 0, "bin size mallctlbymib failure");
+
+ len = sizeof(size_t);
+ expect_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len,
+ NULL, 0), 0, "slab size mallctlbymib failure");
+
+ if (bin_size < 100) {
+ /*
+ * Then we should be as close to 17 as possible. Since
+ * not all page sizes are valid (because of bitmap
+ * limitations on the number of items in a slab), we
+ * should at least make sure that the number of pages
+ * goes up.
+ */
+ expect_zu_ge(slab_size, biggest_slab_seen,
+ "Slab sizes should go up");
+ biggest_slab_seen = slab_size;
+ } else if (
+ (100 <= bin_size && bin_size < 128)
+ || (128 < bin_size && bin_size <= 200)) {
+ expect_zu_eq(slab_size, page,
+ "Forced-small slabs should be small");
+ } else if (bin_size == 128) {
+ expect_zu_eq(slab_size, 2 * page,
+ "Forced-2-page slab should be 2 pages");
+ } else if (200 < bin_size && bin_size <= 4096) {
+ expect_zu_ge(slab_size, biggest_slab_seen,
+ "Slab sizes should go up");
+ biggest_slab_seen = slab_size;
+ }
+ }
+ /*
+ * For any reasonable configuration, 17 pages should be a valid slab
+ * size for 4096-byte items.
+ */
+ expect_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_slab_sizes);
+}
diff --git a/deps/jemalloc/test/integration/slab_sizes.sh b/deps/jemalloc/test/integration/slab_sizes.sh
new file mode 100644
index 0000000..07e3db8
--- /dev/null
+++ b/deps/jemalloc/test/integration/slab_sizes.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+# Some screwy-looking slab sizes.
+export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2"
diff --git a/deps/jemalloc/test/integration/smallocx.c b/deps/jemalloc/test/integration/smallocx.c
new file mode 100644
index 0000000..389319b
--- /dev/null
+++ b/deps/jemalloc/test/integration/smallocx.c
@@ -0,0 +1,312 @@
+#include "test/jemalloc_test.h"
+#include "jemalloc/jemalloc_macros.h"
+
+#define STR_HELPER(x) #x
+#define STR(x) STR_HELPER(x)
+
+#ifndef JEMALLOC_VERSION_GID_IDENT
+ #error "JEMALLOC_VERSION_GID_IDENT not defined"
+#endif
+
+#define JOIN(x, y) x ## y
+#define JOIN2(x, y) JOIN(x, y)
+#define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT)
+
+typedef struct {
+ void *ptr;
+ size_t size;
+} smallocx_return_t;
+
+extern smallocx_return_t
+smallocx(size_t size, int flags);
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+/*
+ * On systems which can't merge extents, tests that call this function generate
+ * a lot of dirty memory very quickly. Purging between cycles mitigates
+ * potential OOM on e.g. 32-bit Windows.
+ */
+static void
+purge(void) {
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl error");
+}
+
+/*
+ * GCC "-Walloc-size-larger-than" warning detects when one of the memory
+ * allocation functions is called with a size larger than the maximum size that
+ * they support. Here we want to explicitly test that the allocation functions
+ * do indeed fail properly when this is the case, which triggers the warning.
+ * Therefore we disable the warning for these tests.
+ */
+JEMALLOC_DIAGNOSTIC_PUSH
+JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
+
+TEST_BEGIN(test_overflow) {
+ size_t largemax;
+
+ largemax = get_large_size(get_nlarge()-1);
+
+ expect_ptr_null(smallocx(largemax+1, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", largemax+1);
+
+ expect_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
+
+ expect_ptr_null(smallocx(SIZE_T_MAX, 0).ptr,
+ "Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX);
+
+ expect_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr,
+ "Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))",
+ ZU(PTRDIFF_MAX)+1);
+}
+TEST_END
+
+static void *
+remote_alloc(void *arg) {
+ unsigned arena;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ size_t large_sz;
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ smallocx_return_t r
+ = smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
+ void *ptr = r.ptr;
+ expect_zu_eq(r.size,
+ nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE),
+ "Expected smalloc(size,flags).size == nallocx(size,flags)");
+ void **ret = (void **)arg;
+ *ret = ptr;
+
+ return NULL;
+}
+
+TEST_BEGIN(test_remote_free) {
+ thd_t thd;
+ void *ret;
+ thd_create(&thd, remote_alloc, (void *)&ret);
+ thd_join(thd, NULL);
+ expect_ptr_not_null(ret, "Unexpected smallocx failure");
+
+ /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
+ dallocx(ret, 0);
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_oom) {
+ size_t largemax;
+ bool oom;
+ void *ptrs[3];
+ unsigned i;
+
+ /*
+ * It should be impossible to allocate three objects that each consume
+ * nearly half the virtual address space.
+ */
+ largemax = get_large_size(get_nlarge()-1);
+ oom = false;
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ ptrs[i] = smallocx(largemax, 0).ptr;
+ if (ptrs[i] == NULL) {
+ oom = true;
+ }
+ }
+ expect_true(oom,
+ "Expected OOM during series of calls to smallocx(size=%zu, 0)",
+ largemax);
+ for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
+ if (ptrs[i] != NULL) {
+ dallocx(ptrs[i], 0);
+ }
+ }
+ purge();
+
+#if LG_SIZEOF_PTR == 3
+ expect_ptr_null(smallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x8000000000000000ULL)).ptr,
+ "Expected OOM for smallocx()");
+ expect_ptr_null(smallocx(0x8000000000000000ULL,
+ MALLOCX_ALIGN(0x80000000)).ptr,
+ "Expected OOM for smallocx()");
+#else
+ expect_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr,
+ "Expected OOM for smallocx()");
+#endif
+}
+TEST_END
+
+/* Re-enable the "-Walloc-size-larger-than=" warning */
+JEMALLOC_DIAGNOSTIC_POP
+
+TEST_BEGIN(test_basic) {
+#define MAXSZ (((size_t)1) << 23)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ smallocx_return_t ret;
+ size_t nsz, rsz, smz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ ret = smallocx(sz, 0);
+ p = ret.ptr;
+ smz = ret.size;
+ expect_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=0) error", sz);
+ rsz = sallocx(p, 0);
+ expect_zu_ge(rsz, sz, "Real size smaller than expected");
+ expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
+ dallocx(p, 0);
+
+ ret = smallocx(sz, 0);
+ p = ret.ptr;
+ smz = ret.size;
+ expect_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=0) error", sz);
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ expect_zu_ne(smz, 0, "Unexpected smallocx() error");
+ ret = smallocx(sz, MALLOCX_ZERO);
+ p = ret.ptr;
+ expect_ptr_not_null(p,
+ "Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error",
+ nsz);
+ rsz = sallocx(p, 0);
+ expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
+ dallocx(p, 0);
+ purge();
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size) {
+ const char *percpu_arena;
+ size_t sz = sizeof(percpu_arena);
+
+ if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
+ strcmp(percpu_arena, "disabled") != 0) {
+ test_skip("test_alignment_and_size skipped: "
+ "not working with percpu arena.");
+ };
+#define MAXALIGN (((size_t)1) << 23)
+#define NITER 4
+ size_t nsz, rsz, smz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++) {
+ ps[i] = NULL;
+ }
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ expect_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ smallocx_return_t ret
+ = smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO);
+ ps[i] = ret.ptr;
+ expect_ptr_not_null(ps[i],
+ "smallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ smz = ret.size;
+ expect_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_zu_eq(nsz, smz,
+ "nallocx()/smallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ expect_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1)) {
+ break;
+ }
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ purge();
+ }
+#undef MAXALIGN
+#undef NITER
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_overflow,
+ test_oom,
+ test_remote_free,
+ test_basic,
+ test_alignment_and_size);
+}
diff --git a/deps/jemalloc/test/integration/smallocx.sh b/deps/jemalloc/test/integration/smallocx.sh
new file mode 100644
index 0000000..d07f10f
--- /dev/null
+++ b/deps/jemalloc/test/integration/smallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi
diff --git a/deps/jemalloc/test/integration/thread_arena.c b/deps/jemalloc/test/integration/thread_arena.c
new file mode 100644
index 0000000..4a6abf6
--- /dev/null
+++ b/deps/jemalloc/test/integration/thread_arena.c
@@ -0,0 +1,86 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+void *
+thd_start(void *arg) {
+ unsigned main_arena_ind = *(unsigned *)arg;
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Error in malloc()");
+ free(p);
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&arena_ind, &size,
+ (void *)&main_arena_ind, sizeof(main_arena_ind)))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
+ 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+ expect_u_eq(arena_ind, main_arena_ind,
+ "Arena index should be same as for main thread");
+
+ return NULL;
+}
+
+static void
+mallctl_failure(int err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+}
+
+TEST_BEGIN(test_thread_arena) {
+ void *p;
+ int err;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ p = malloc(1);
+ expect_ptr_not_null(p, "Error in malloc()");
+
+ unsigned arena_ind, old_arena_ind;
+ size_t sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ 0, "Arena creation failure");
+
+ size_t size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size,
+ (void *)&arena_ind, sizeof(arena_ind))) != 0) {
+ mallctl_failure(err);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)&arena_ind);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ intptr_t join_ret;
+ thd_join(thds[i], (void *)&join_ret);
+ expect_zd_eq(join_ret, 0, "Unexpected thread join error");
+ }
+ free(p);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_thread_arena);
+}
diff --git a/deps/jemalloc/test/integration/thread_tcache_enabled.c b/deps/jemalloc/test/integration/thread_tcache_enabled.c
new file mode 100644
index 0000000..d44dbe9
--- /dev/null
+++ b/deps/jemalloc/test/integration/thread_tcache_enabled.c
@@ -0,0 +1,87 @@
+#include "test/jemalloc_test.h"
+
+void *
+thd_start(void *arg) {
+ bool e0, e1;
+ size_t sz = sizeof(bool);
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ if (e0) {
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+ }
+
+ e1 = true;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_false(e0, "tcache should be disabled");
+
+ e1 = true;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
+ (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ expect_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ return NULL;
+}
+
+TEST_BEGIN(test_main_thread) {
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread) {
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void) {
+ /* Run tests multiple times to check for bad interactions. */
+ return test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread);
+}
diff --git a/deps/jemalloc/test/integration/xallocx.c b/deps/jemalloc/test/integration/xallocx.c
new file mode 100644
index 0000000..1370854
--- /dev/null
+++ b/deps/jemalloc/test/integration/xallocx.c
@@ -0,0 +1,384 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Use a separate arena for xallocx() extension/contraction tests so that
+ * internal allocation e.g. by heap profiling can't interpose allocations where
+ * xallocx() would ordinarily be able to extend.
+ */
+static unsigned
+arena_ind(void) {
+ static unsigned ind = 0;
+
+ if (ind == 0) {
+ size_t sz = sizeof(ind);
+ expect_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure creating arena");
+ }
+
+ return ind;
+}
+
+TEST_BEGIN(test_same_size) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, 0, 0);
+ expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_no_move) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, sz-42, 0);
+ expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_no_move_fail) {
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz + 5, 0, 0);
+ expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static unsigned
+get_nsizes_impl(const char *cmd) {
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return ret;
+}
+
+static unsigned
+get_nsmall(void) {
+ return get_nsizes_impl("arenas.nbins");
+}
+
+static unsigned
+get_nlarge(void) {
+ return get_nsizes_impl("arenas.nlextents");
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind) {
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return ret;
+}
+
+static size_t
+get_small_size(size_t ind) {
+ return get_size_impl("arenas.bin.0.size", ind);
+}
+
+static size_t
+get_large_size(size_t ind) {
+ return get_size_impl("arenas.lextent.0.size", ind);
+}
+
+TEST_BEGIN(test_size) {
+ size_t small0, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test smallest supported size. */
+ expect_zu_eq(xallocx(p, 1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test largest supported size. */
+ expect_zu_le(xallocx(p, largemax, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test size overflow. */
+ expect_zu_le(xallocx(p, largemax+1, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_size_extra_overflow) {
+ size_t small0, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test overflows that can be resolved by clamping extra. */
+ expect_zu_le(xallocx(p, largemax-1, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, largemax, 1, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test overflow such that largemax-size underflows. */
+ expect_zu_le(xallocx(p, largemax+1, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, largemax+2, 3, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_small) {
+ size_t small0, small1, largemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ small1 = get_small_size(1);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(small0, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ expect_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test size+extra overflow. */
+ expect_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0,
+ "Unexpected xallocx() behavior");
+ expect_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_large) {
+ int flags = MALLOCX_ARENA(arena_ind());
+ size_t smallmax, large1, large2, large3, largemax;
+ void *p;
+
+ /* Get size classes. */
+ smallmax = get_small_size(get_nsmall()-1);
+ large1 = get_large_size(1);
+ large2 = get_large_size(2);
+ large3 = get_large_size(3);
+ largemax = get_large_size(get_nlarge()-1);
+
+ p = mallocx(large3, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+
+ expect_zu_eq(xallocx(p, large3, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ expect_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ expect_zu_ge(xallocx(p, smallmax, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+
+ if (xallocx(p, large3, 0, flags) != large3) {
+ p = rallocx(p, large3, flags);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ /* Test size decrease with non-zero extra. */
+ expect_zu_eq(xallocx(p, large1, large3 - large1, flags), large3,
+ "Unexpected xallocx() behavior");
+ expect_zu_eq(xallocx(p, large2, large3 - large2, flags), large3,
+ "Unexpected xallocx() behavior");
+ expect_zu_ge(xallocx(p, large1, large2 - large1, flags), large2,
+ "Unexpected xallocx() behavior");
+ expect_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ expect_zu_le(xallocx(p, large3, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+ expect_zu_le(xallocx(p, largemax+1, 0, flags), large3,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ expect_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax,
+ "Unexpected xallocx() behavior");
+
+ expect_zu_ge(xallocx(p, large1, 0, flags), large1,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ expect_zu_le(xallocx(p, large1, large3 - large1, flags), large3,
+ "Unexpected xallocx() behavior");
+
+ if (xallocx(p, large3, 0, flags) != large3) {
+ p = rallocx(p, large3, flags);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ /* Test size+extra overflow. */
+ expect_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, flags);
+}
+TEST_END
+
+static void
+print_filled_extents(const void *p, uint8_t c, size_t len) {
+ const uint8_t *pc = (const uint8_t *)p;
+ size_t i, range0;
+ uint8_t c0;
+
+ malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len);
+ range0 = 0;
+ c0 = pc[0];
+ for (i = 0; i < len; i++) {
+ if (pc[i] != c0) {
+ malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
+ range0 = i;
+ c0 = pc[i];
+ }
+ }
+ malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
+}
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
+ const uint8_t *pc = (const uint8_t *)p;
+ bool err;
+ size_t i;
+
+ for (i = offset, err = false; i < offset+len; i++) {
+ if (pc[i] != c) {
+ err = true;
+ }
+ }
+
+ if (err) {
+ print_filled_extents(p, c, offset + len);
+ }
+
+ return err;
+}
+
+static void
+test_zero(size_t szmin, size_t szmax) {
+ int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
+ size_t sz, nsz;
+ void *p;
+#define FILL_BYTE 0x7aU
+
+ sz = szmax;
+ p = mallocx(sz, flags);
+ expect_ptr_not_null(p, "Unexpected mallocx() error");
+ expect_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
+ sz);
+
+ /*
+ * Fill with non-zero so that non-debug builds are more likely to detect
+ * errors.
+ */
+ memset(p, FILL_BYTE, sz);
+ expect_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ /* Shrink in place so that we can expect growing in place to succeed. */
+ sz = szmin;
+ if (xallocx(p, sz, 0, flags) != sz) {
+ p = rallocx(p, sz, flags);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ expect_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ for (sz = szmin; sz < szmax; sz = nsz) {
+ nsz = nallocx(sz+1, flags);
+ if (xallocx(p, sz+1, 0, flags) != nsz) {
+ p = rallocx(p, sz+1, flags);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ }
+ expect_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+ expect_false(validate_fill(p, 0x00, sz, nsz-sz),
+ "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
+ memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
+ expect_false(validate_fill(p, FILL_BYTE, 0, nsz),
+ "Memory not filled: nsz=%zu", nsz);
+ }
+
+ dallocx(p, flags);
+}
+
+TEST_BEGIN(test_zero_large) {
+ size_t large0, large1;
+
+ /* Get size classes. */
+ large0 = get_large_size(0);
+ large1 = get_large_size(1);
+
+ test_zero(large1, large0 * 2);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_same_size,
+ test_extra_no_move,
+ test_no_move_fail,
+ test_size,
+ test_size_extra_overflow,
+ test_extra_small,
+ test_extra_large,
+ test_zero_large);
+}
diff --git a/deps/jemalloc/test/integration/xallocx.sh b/deps/jemalloc/test/integration/xallocx.sh
new file mode 100644
index 0000000..0cc2187
--- /dev/null
+++ b/deps/jemalloc/test/integration/xallocx.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_fill}" = "x1" ] ; then
+ export MALLOC_CONF="junk:false"
+fi