summaryrefslogtreecommitdiffstats
path: root/src/fluent-bit/lib/jemalloc-5.3.0/test/stress/fill_flush.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:08:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:08:18 +0000
commit5da14042f70711ea5cf66e034699730335462f66 (patch)
tree0f6354ccac934ed87a2d555f45be4c831cf92f4a /src/fluent-bit/lib/jemalloc-5.3.0/test/stress/fill_flush.c
parentReleasing debian version 1.44.3-2. (diff)
downloadnetdata-5da14042f70711ea5cf66e034699730335462f66.tar.xz
netdata-5da14042f70711ea5cf66e034699730335462f66.zip
Merging upstream version 1.45.3+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/fluent-bit/lib/jemalloc-5.3.0/test/stress/fill_flush.c')
-rw-r--r--src/fluent-bit/lib/jemalloc-5.3.0/test/stress/fill_flush.c76
1 files changed, 76 insertions, 0 deletions
diff --git a/src/fluent-bit/lib/jemalloc-5.3.0/test/stress/fill_flush.c b/src/fluent-bit/lib/jemalloc-5.3.0/test/stress/fill_flush.c
new file mode 100644
index 000000000..a2db044dd
--- /dev/null
+++ b/src/fluent-bit/lib/jemalloc-5.3.0/test/stress/fill_flush.c
@@ -0,0 +1,76 @@
+#include "test/jemalloc_test.h"
+#include "test/bench.h"
+
+#define SMALL_ALLOC_SIZE 128
+#define LARGE_ALLOC_SIZE SC_LARGE_MINCLASS
+#define NALLOCS 1000
+
+/*
+ * We make this volatile so the 1-at-a-time variants can't leave the allocation
+ * in a register, just to try to get the cache behavior closer.
+ */
+void *volatile allocs[NALLOCS];
+
+static void
+array_alloc_dalloc_small(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(SMALL_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ }
+ for (int i = 0; i < NALLOCS; i++) {
+ sdallocx(allocs[i], SMALL_ALLOC_SIZE, 0);
+ }
+}
+
+static void
+item_alloc_dalloc_small(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(SMALL_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ sdallocx(allocs[i], SMALL_ALLOC_SIZE, 0);
+ }
+}
+
+TEST_BEGIN(test_array_vs_item_small) {
+ compare_funcs(1 * 1000, 10 * 1000,
+ "array of small allocations", array_alloc_dalloc_small,
+ "small item allocation", item_alloc_dalloc_small);
+}
+TEST_END
+
+static void
+array_alloc_dalloc_large(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(LARGE_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ }
+ for (int i = 0; i < NALLOCS; i++) {
+ sdallocx(allocs[i], LARGE_ALLOC_SIZE, 0);
+ }
+}
+
+static void
+item_alloc_dalloc_large(void) {
+ for (int i = 0; i < NALLOCS; i++) {
+ void *p = mallocx(LARGE_ALLOC_SIZE, 0);
+ assert_ptr_not_null(p, "mallocx shouldn't fail");
+ allocs[i] = p;
+ sdallocx(allocs[i], LARGE_ALLOC_SIZE, 0);
+ }
+}
+
+TEST_BEGIN(test_array_vs_item_large) {
+ compare_funcs(100, 1000,
+ "array of large allocations", array_alloc_dalloc_large,
+ "large item allocation", item_alloc_dalloc_large);
+}
+TEST_END
+
+int main(void) {
+ return test_no_reentrancy(
+ test_array_vs_item_small,
+ test_array_vs_item_large);
+}