summaryrefslogtreecommitdiffstats
path: root/deps/jemalloc/test/stress
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 13:40:54 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 13:40:54 +0000
commit317c0644ccf108aa23ef3fd8358bd66c2840bfc0 (patch)
treec417b3d25c86b775989cb5ac042f37611b626c8a /deps/jemalloc/test/stress
parentInitial commit. (diff)
downloadredis-upstream/5%7.2.4.tar.xz
redis-upstream/5%7.2.4.zip
Adding upstream version 5:7.2.4.upstream/5%7.2.4
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'deps/jemalloc/test/stress')
-rw-r--r--deps/jemalloc/test/stress/batch_alloc.c198
-rw-r--r--deps/jemalloc/test/stress/fill_flush.c76
-rw-r--r--deps/jemalloc/test/stress/hookbench.c73
-rw-r--r--deps/jemalloc/test/stress/large_microbench.c33
-rw-r--r--deps/jemalloc/test/stress/mallctl.c74
-rw-r--r--deps/jemalloc/test/stress/microbench.c126
6 files changed, 580 insertions, 0 deletions
diff --git a/deps/jemalloc/test/stress/batch_alloc.c b/deps/jemalloc/test/stress/batch_alloc.c
new file mode 100644
index 0000000..427e1cb
--- /dev/null
+++ b/deps/jemalloc/test/stress/batch_alloc.c
@@ -0,0 +1,198 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+#define MIBLEN 8
+static size_t mib[MIBLEN];
+static size_t miblen = MIBLEN;
+
+#define TINY_BATCH 10
+#define TINY_BATCH_ITER (10 * 1000 * 1000)
+#define HUGE_BATCH (1000 * 1000)
+#define HUGE_BATCH_ITER 100
+#define LEN (100 * 1000 * 1000)
+static void *batch_ptrs[LEN];
+static size_t batch_ptrs_next = 0;
+static void *item_ptrs[LEN];
+static size_t item_ptrs_next = 0;
+
+#define SIZE 7
+
+typedef struct batch_alloc_packet_s batch_alloc_packet_t;
+struct batch_alloc_packet_s {
+ void **ptrs;
+ size_t num;
+ size_t size;
+ int flags;
+};
+
+static void
+batch_alloc_wrapper(size_t batch) {
+ batch_alloc_packet_t batch_alloc_packet =
+ {batch_ptrs + batch_ptrs_next, batch, SIZE, 0};
+ size_t filled;
+ size_t len = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &filled, &len,
+ &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, "");
+ assert_zu_eq(filled, batch, "");
+}
+
+static void
+item_alloc_wrapper(size_t batch) {
+ for (size_t i = item_ptrs_next, end = i + batch; i < end; ++i) {
+ item_ptrs[i] = malloc(SIZE);
+ }
+}
+
+static void
+release_and_clear(void **ptrs, size_t len) {
+ for (size_t i = 0; i < len; ++i) {
+ void *p = ptrs[i];
+ assert_ptr_not_null(p, "allocation failed");
+ sdallocx(p, SIZE, 0);
+ ptrs[i] = NULL;
+ }
+}
+
+static void
+batch_alloc_without_free(size_t batch) {
+ batch_alloc_wrapper(batch);
+ batch_ptrs_next += batch;
+}
+
+static void
+item_alloc_without_free(size_t batch) {
+ item_alloc_wrapper(batch);
+ item_ptrs_next += batch;
+}
+
+static void
+batch_alloc_with_free(size_t batch) {
+ batch_alloc_wrapper(batch);
+ release_and_clear(batch_ptrs + batch_ptrs_next, batch);
+ batch_ptrs_next += batch;
+}
+
+static void
+item_alloc_with_free(size_t batch) {
+ item_alloc_wrapper(batch);
+ release_and_clear(item_ptrs + item_ptrs_next, batch);
+ item_ptrs_next += batch;
+}
+
+static void
+compare_without_free(size_t batch, size_t iter,
+ void (*batch_alloc_without_free_func)(void),
+ void (*item_alloc_without_free_func)(void)) {
+ assert(batch_ptrs_next == 0);
+ assert(item_ptrs_next == 0);
+ assert(batch * iter <= LEN);
+ for (size_t i = 0; i < iter; ++i) {
+ batch_alloc_without_free_func();
+ item_alloc_without_free_func();
+ }
+ release_and_clear(batch_ptrs, batch_ptrs_next);
+ batch_ptrs_next = 0;
+ release_and_clear(item_ptrs, item_ptrs_next);
+ item_ptrs_next = 0;
+ compare_funcs(0, iter,
+ "batch allocation", batch_alloc_without_free_func,
+ "item allocation", item_alloc_without_free_func);
+ release_and_clear(batch_ptrs, batch_ptrs_next);
+ batch_ptrs_next = 0;
+ release_and_clear(item_ptrs, item_ptrs_next);
+ item_ptrs_next = 0;
+}
+
+static void
+compare_with_free(size_t batch, size_t iter,
+ void (*batch_alloc_with_free_func)(void),
+ void (*item_alloc_with_free_func)(void)) {
+ assert(batch_ptrs_next == 0);
+ assert(item_ptrs_next == 0);
+ assert(batch * iter <= LEN);
+ for (size_t i = 0; i < iter; ++i) {
+ batch_alloc_with_free_func();
+ item_alloc_with_free_func();
+ }
+ batch_ptrs_next = 0;
+ item_ptrs_next = 0;
+ compare_funcs(0, iter,
+ "batch allocation", batch_alloc_with_free_func,
+ "item allocation", item_alloc_with_free_func);
+ batch_ptrs_next = 0;
+ item_ptrs_next = 0;
+}
+
+static void
+batch_alloc_without_free_tiny() {
+ batch_alloc_without_free(TINY_BATCH);
+}
+
+static void
+item_alloc_without_free_tiny() {
+ item_alloc_without_free(TINY_BATCH);
+}
+
+TEST_BEGIN(test_tiny_batch_without_free) {
+ compare_without_free(TINY_BATCH, TINY_BATCH_ITER,
+ batch_alloc_without_free_tiny, item_alloc_without_free_tiny);
+}
+TEST_END
+
+static void
+batch_alloc_with_free_tiny() {
+ batch_alloc_with_free(TINY_BATCH);
+}
+
+static void
+item_alloc_with_free_tiny() {
+ item_alloc_with_free(TINY_BATCH);
+}
+
+TEST_BEGIN(test_tiny_batch_with_free) {
+ compare_with_free(TINY_BATCH, TINY_BATCH_ITER,
+ batch_alloc_with_free_tiny, item_alloc_with_free_tiny);
+}
+TEST_END
+
+static void
+batch_alloc_without_free_huge() {
+ batch_alloc_without_free(HUGE_BATCH);
+}
+
+static void
+item_alloc_without_free_huge() {
+ item_alloc_without_free(HUGE_BATCH);
+}
+
+TEST_BEGIN(test_huge_batch_without_free) {
+ compare_without_free(HUGE_BATCH, HUGE_BATCH_ITER,
+ batch_alloc_without_free_huge, item_alloc_without_free_huge);
+}
+TEST_END
+
+static void
+batch_alloc_with_free_huge() {
+ batch_alloc_with_free(HUGE_BATCH);
+}
+
+static void
+item_alloc_with_free_huge() {
+ item_alloc_with_free(HUGE_BATCH);
+}
+
+TEST_BEGIN(test_huge_batch_with_free) {
+ compare_with_free(HUGE_BATCH, HUGE_BATCH_ITER,
+ batch_alloc_with_free_huge, item_alloc_with_free_huge);
+}
+TEST_END
+
+int main(void) {
+ assert_d_eq(mallctlnametomib("experimental.batch_alloc", mib, &miblen),
+ 0, "");
+ return test_no_reentrancy(
+ test_tiny_batch_without_free,
+ test_tiny_batch_with_free,
+ test_huge_batch_without_free,
+ test_huge_batch_with_free);
+}
diff --git a/deps/jemalloc/test/stress/fill_flush.c b/deps/jemalloc/test/stress/fill_flush.c
new file mode 100644
index 0000000..a2db044
--- /dev/null
+++ b/deps/jemalloc/test/stress/fill_flush.c
@@ -0,0 +1,76 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+#define SMALL_ALLOC_SIZE 128
+#define LARGE_ALLOC_SIZE SC_LARGE_MINCLASS
+#define NALLOCS 1000
+
+/*
+ * We make this volatile so the 1-at-a-time variants can't leave the allocation
+ * in a register, just to try to get the cache behavior closer.
+ */
+void *volatile allocs[NALLOCS];
+
+static void
+array_alloc_dalloc_small(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(SMALL_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ }
+ for (int i = 0; i < NALLOCS; i++) {
+ sdallocx(allocs[i], SMALL_ALLOC_SIZE, 0);
+ }
+}
+
+static void
+item_alloc_dalloc_small(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(SMALL_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ sdallocx(allocs[i], SMALL_ALLOC_SIZE, 0);
+ }
+}
+
+TEST_BEGIN(test_array_vs_item_small) {
+ compare_funcs(1 * 1000, 10 * 1000,
+ "array of small allocations", array_alloc_dalloc_small,
+ "small item allocation", item_alloc_dalloc_small);
+}
+TEST_END
+
+static void
+array_alloc_dalloc_large(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(LARGE_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ }
+ for (int i = 0; i < NALLOCS; i++) {
+ sdallocx(allocs[i], LARGE_ALLOC_SIZE, 0);
+ }
+}
+
+static void
+item_alloc_dalloc_large(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(LARGE_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ sdallocx(allocs[i], LARGE_ALLOC_SIZE, 0);
+ }
+}
+
+TEST_BEGIN(test_array_vs_item_large) {
+ compare_funcs(100, 1000,
+ "array of large allocations", array_alloc_dalloc_large,
+ "large item allocation", item_alloc_dalloc_large);
+}
+TEST_END
+
+int main(void) {
+ return test_no_reentrancy(
+ test_array_vs_item_small,
+ test_array_vs_item_large);
+}
diff --git a/deps/jemalloc/test/stress/hookbench.c b/deps/jemalloc/test/stress/hookbench.c
new file mode 100644
index 0000000..97e90b0
--- /dev/null
+++ b/deps/jemalloc/test/stress/hookbench.c
@@ -0,0 +1,73 @@
+#include "test/jemalloc_test.h"
+
+static void
+noop_alloc_hook(void *extra, hook_alloc_t type, void *result,
+ uintptr_t result_raw, uintptr_t args_raw[3]) {
+}
+
+static void
+noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
+ uintptr_t args_raw[3]) {
+}
+
+static void
+noop_expand_hook(void *extra, hook_expand_t type, void *address,
+ size_t old_usize, size_t new_usize, uintptr_t result_raw,
+ uintptr_t args_raw[4]) {
+}
+
+static void
+malloc_free_loop(int iters) {
+ for (int i = 0; i < iters; i++) {
+ void *p = mallocx(1, 0);
+ free(p);
+ }
+}
+
+static void
+test_hooked(int iters) {
+ hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook,
+ NULL};
+
+ int err;
+ void *handles[HOOK_MAX];
+ size_t sz = sizeof(handles[0]);
+
+ for (int i = 0; i < HOOK_MAX; i++) {
+ err = mallctl("experimental.hooks.install", &handles[i],
+ &sz, &hooks, sizeof(hooks));
+ assert(err == 0);
+
+ timedelta_t timer;
+ timer_start(&timer);
+ malloc_free_loop(iters);
+ timer_stop(&timer);
+ malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1,
+ i + 1 == 1 ? "" : "s", timer_usec(&timer));
+ }
+ for (int i = 0; i < HOOK_MAX; i++) {
+ err = mallctl("experimental.hooks.remove", NULL, NULL,
+ &handles[i], sizeof(handles[i]));
+ assert(err == 0);
+ }
+}
+
+static void
+test_unhooked(int iters) {
+ timedelta_t timer;
+ timer_start(&timer);
+ malloc_free_loop(iters);
+ timer_stop(&timer);
+
+ malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer));
+}
+
+int
+main(void) {
+ /* Initialize */
+ free(mallocx(1, 0));
+ int iters = 10 * 1000 * 1000;
+ malloc_printf("Benchmarking hooks with %d iterations:\n", iters);
+ test_hooked(iters);
+ test_unhooked(iters);
+}
diff --git a/deps/jemalloc/test/stress/large_microbench.c b/deps/jemalloc/test/stress/large_microbench.c
new file mode 100644
index 0000000..c66b33a
--- /dev/null
+++ b/deps/jemalloc/test/stress/large_microbench.c
@@ -0,0 +1,33 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+static void
+large_mallocx_free(void) {
+ /*
+ * We go a bit larger than the large minclass on its own to better
+ * expose costs from things like zeroing.
+ */
+ void *p = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ free(p);
+}
+
+static void
+small_mallocx_free(void) {
+ void *p = mallocx(16, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ free(p);
+}
+
+TEST_BEGIN(test_large_vs_small) {
+ compare_funcs(100*1000, 1*1000*1000, "large mallocx",
+ large_mallocx_free, "small mallocx", small_mallocx_free);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_large_vs_small);
+}
+
diff --git a/deps/jemalloc/test/stress/mallctl.c b/deps/jemalloc/test/stress/mallctl.c
new file mode 100644
index 0000000..d29b311
--- /dev/null
+++ b/deps/jemalloc/test/stress/mallctl.c
@@ -0,0 +1,74 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+static void
+mallctl_short(void) {
+ const char *version;
+ size_t sz = sizeof(version);
+ int err = mallctl("version", &version, &sz, NULL, 0);
+ assert_d_eq(err, 0, "mallctl failure");
+}
+
+size_t mib_short[1];
+
+static void
+mallctlbymib_short(void) {
+ size_t miblen = sizeof(mib_short)/sizeof(mib_short[0]);
+ const char *version;
+ size_t sz = sizeof(version);
+ int err = mallctlbymib(mib_short, miblen, &version, &sz, NULL, 0);
+ assert_d_eq(err, 0, "mallctlbymib failure");
+}
+
+TEST_BEGIN(test_mallctl_vs_mallctlbymib_short) {
+ size_t miblen = sizeof(mib_short)/sizeof(mib_short[0]);
+
+ int err = mallctlnametomib("version", mib_short, &miblen);
+ assert_d_eq(err, 0, "mallctlnametomib failure");
+ compare_funcs(10*1000*1000, 10*1000*1000, "mallctl_short",
+ mallctl_short, "mallctlbymib_short", mallctlbymib_short);
+}
+TEST_END
+
+static void
+mallctl_long(void) {
+ uint64_t nmalloc;
+ size_t sz = sizeof(nmalloc);
+ int err = mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, NULL,
+ 0);
+ assert_d_eq(err, 0, "mallctl failure");
+}
+
+size_t mib_long[6];
+
+static void
+mallctlbymib_long(void) {
+ size_t miblen = sizeof(mib_long)/sizeof(mib_long[0]);
+ uint64_t nmalloc;
+ size_t sz = sizeof(nmalloc);
+ int err = mallctlbymib(mib_long, miblen, &nmalloc, &sz, NULL, 0);
+ assert_d_eq(err, 0, "mallctlbymib failure");
+}
+
+TEST_BEGIN(test_mallctl_vs_mallctlbymib_long) {
+ /*
+ * We want to use the longest mallctl we have; that needs stats support
+ * to be allowed.
+ */
+ test_skip_if(!config_stats);
+
+ size_t miblen = sizeof(mib_long)/sizeof(mib_long[0]);
+ int err = mallctlnametomib("stats.arenas.0.bins.0.nmalloc", mib_long,
+ &miblen);
+ assert_d_eq(err, 0, "mallctlnametomib failure");
+ compare_funcs(10*1000*1000, 10*1000*1000, "mallctl_long",
+ mallctl_long, "mallctlbymib_long", mallctlbymib_long);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_mallctl_vs_mallctlbymib_short,
+ test_mallctl_vs_mallctlbymib_long);
+}
diff --git a/deps/jemalloc/test/stress/microbench.c b/deps/jemalloc/test/stress/microbench.c
new file mode 100644
index 0000000..062e32f
--- /dev/null
+++ b/deps/jemalloc/test/stress/microbench.c
@@ -0,0 +1,126 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+static void
+malloc_free(void) {
+ /* The compiler can optimize away free(malloc(1))! */
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ free(p);
+}
+
+static void
+mallocx_free(void) {
+ void *p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_malloc_vs_mallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc",
+ malloc_free, "mallocx", mallocx_free);
+}
+TEST_END
+
+static void
+malloc_dallocx(void) {
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ dallocx(p, 0);
+}
+
+static void
+malloc_sdallocx(void) {
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ sdallocx(p, 1, 0);
+}
+
+TEST_BEGIN(test_free_vs_dallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free,
+ "dallocx", malloc_dallocx);
+}
+TEST_END
+
+TEST_BEGIN(test_dallocx_vs_sdallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx,
+ "sdallocx", malloc_sdallocx);
+}
+TEST_END
+
+static void
+malloc_mus_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ TEST_MALLOC_SIZE(p);
+ free(p);
+}
+
+static void
+malloc_sallocx_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (sallocx(p, 0) < 1) {
+ test_fail("Unexpected sallocx() failure");
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_mus_vs_sallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size",
+ malloc_mus_free, "sallocx", malloc_sallocx_free);
+}
+TEST_END
+
+static void
+malloc_nallocx_free(void) {
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (nallocx(1, 0) < 1) {
+ test_fail("Unexpected nallocx() failure");
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_sallocx_vs_nallocx) {
+ compare_funcs(10*1000*1000, 100*1000*1000, "sallocx",
+ malloc_sallocx_free, "nallocx", malloc_nallocx_free);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_malloc_vs_mallocx,
+ test_free_vs_dallocx,
+ test_dallocx_vs_sdallocx,
+ test_mus_vs_sallocx,
+ test_sallocx_vs_nallocx);
+}