summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/jemalloc-5.3.0/include
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-07-24 09:54:23 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-07-24 09:54:44 +0000
commit836b47cb7e99a977c5a23b059ca1d0b5065d310e (patch)
tree1604da8f482d02effa033c94a84be42bc0c848c3 /fluent-bit/lib/jemalloc-5.3.0/include
parentReleasing debian version 1.44.3-2. (diff)
downloadnetdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.tar.xz
netdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.zip
Merging upstream version 1.46.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fluent-bit/lib/jemalloc-5.3.0/include')
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/activity_callback.h23
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_externs.h121
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_inlines_a.h24
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_inlines_b.h550
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_stats.h114
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_structs.h101
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_types.h58
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/assert.h56
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic.h107
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_c11.h97
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_gcc_atomic.h129
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_gcc_sync.h195
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_msvc.h158
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_externs.h33
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_inlines.h48
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_structs.h66
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/base.h110
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin.h82
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_info.h50
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_stats.h57
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_types.h17
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bit_util.h422
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bitmap.h368
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/buf_writer.h32
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/cache_bin.h670
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ckh.h101
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/counter.h34
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ctl.h159
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/decay.h186
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/div.h41
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ecache.h55
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/edata.h698
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/edata_cache.h49
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ehooks.h412
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/emap.h357
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/emitter.h510
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/eset.h77
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/exp_grow.h50
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent.h137
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent_dss.h26
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent_mmap.h10
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/fb.h373
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/fxp.h126
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hash.h320
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hook.h163
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa.h182
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa_hooks.h17
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa_opts.h74
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpdata.h413
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/inspect.h40
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_decls.h108
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_defs.h.in427
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_externs.h75
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_includes.h84
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_a.h122
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_b.h103
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_c.h340
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_macros.h111
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_types.h130
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_preamble.h.in263
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/large_externs.h24
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/lockedint.h204
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/log.h115
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/malloc_io.h105
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mpsc_queue.h134
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mutex.h319
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mutex_prof.h117
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/nstime.h73
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pa.h243
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pac.h179
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pages.h119
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pai.h95
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/peak.h37
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/peak_event.h24
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ph.h520
-rwxr-xr-xfluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/private_namespace.sh5
-rwxr-xr-xfluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/private_symbols.sh51
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prng.h168
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_data.h37
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_externs.h95
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_hook.h21
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_inlines.h261
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_log.h22
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_recent.h23
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_stats.h17
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_structs.h221
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_sys.h30
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_types.h75
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/psset.h131
-rwxr-xr-xfluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/public_namespace.sh6
-rwxr-xr-xfluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/public_unnamespace.sh6
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ql.h197
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/qr.h140
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/quantum.h87
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rb.h1856
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rtree.h554
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rtree_tsd.h62
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/safety_check.h31
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/san.h191
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/san_bump.h52
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sc.h357
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sec.h120
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sec_opts.h59
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/seq.h55
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/slab_data.h12
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/smoothstep.h232
-rwxr-xr-xfluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/smoothstep.sh101
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/spin.h40
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/stats.h54
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sz.h371
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_externs.h75
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_inlines.h193
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_structs.h68
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_types.h35
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/test_hooks.h24
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/thread_event.h301
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ticker.h175
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd.h518
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_generic.h182
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_malloc_thread_cleanup.h61
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_tls.h60
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_types.h10
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_win.h139
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/typed_list.h55
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/util.h123
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/witness.h378
-rwxr-xr-xfluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc.sh27
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_defs.h.in54
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_macros.h.in149
-rwxr-xr-xfluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_mangle.sh45
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_protos.h.in71
-rwxr-xr-xfluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_rename.sh22
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_typedefs.h.in77
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/C99/stdbool.h20
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/C99/stdint.h247
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/strings.h58
-rw-r--r--fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/windows_extra.h6
137 files changed, 0 insertions, 21282 deletions
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/activity_callback.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/activity_callback.h
deleted file mode 100644
index 6c2e84e3..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/activity_callback.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
-#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
-
-/*
- * The callback to be executed "periodically", in response to some amount of
- * allocator activity.
- *
- * This callback need not be computing any sort of peak (although that's the
- * intended first use case), but we drive it from the peak counter, so it's
- * keeps things tidy to keep it here.
- *
- * The calls to this thunk get driven by the peak_event module.
- */
-#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL}
-typedef void (*activity_callback_t)(void *uctx, uint64_t allocated,
- uint64_t deallocated);
-typedef struct activity_callback_thunk_s activity_callback_thunk_t;
-struct activity_callback_thunk_s {
- activity_callback_t callback;
- void *uctx;
-};
-
-#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_externs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_externs.h
deleted file mode 100644
index e6fceaaf..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_externs.h
+++ /dev/null
@@ -1,121 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
-#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
-
-#include "jemalloc/internal/bin.h"
-#include "jemalloc/internal/div.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/hook.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/stats.h"
-
-/*
- * When the amount of pages to be purged exceeds this amount, deferred purge
- * should happen.
- */
-#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
-
-extern ssize_t opt_dirty_decay_ms;
-extern ssize_t opt_muzzy_decay_ms;
-
-extern percpu_arena_mode_t opt_percpu_arena;
-extern const char *percpu_arena_mode_names[];
-
-extern div_info_t arena_binind_div_info[SC_NBINS];
-
-extern malloc_mutex_t arenas_lock;
-extern emap_t arena_emap_global;
-
-extern size_t opt_oversize_threshold;
-extern size_t oversize_threshold;
-
-/*
- * arena_bin_offsets[binind] is the offset of the first bin shard for size class
- * binind.
- */
-extern uint32_t arena_bin_offsets[SC_NBINS];
-
-void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
- unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
- ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
-void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
- size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
- bin_stats_data_t *bstats, arena_stats_large_t *lstats,
- pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
-void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
-edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
- size_t usize, size_t alignment, bool zero);
-void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
- edata_t *edata);
-void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
- edata_t *edata, size_t oldsize);
-void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
- edata_t *edata, size_t oldsize);
-bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
- ssize_t decay_ms);
-ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
-void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
- bool all);
-uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
-void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
-void arena_reset(tsd_t *tsd, arena_t *arena);
-void arena_destroy(tsd_t *tsd, arena_t *arena);
-void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
- cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
- const unsigned nfill);
-
-void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
- szind_t ind, bool zero);
-void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache);
-void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
-void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
- bool slow_path);
-void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
-
-void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
- edata_t *slab, bin_t *bin);
-void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
- edata_t *slab, bin_t *bin);
-void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
-bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero, size_t *newsize);
-void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
- size_t size, size_t alignment, bool zero, tcache_t *tcache,
- hook_ralloc_args_t *hook_args);
-dss_prec_t arena_dss_prec_get(arena_t *arena);
-ehooks_t *arena_get_ehooks(arena_t *arena);
-extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
- extent_hooks_t *extent_hooks);
-bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
-ssize_t arena_dirty_decay_ms_default_get(void);
-bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
-ssize_t arena_muzzy_decay_ms_default_get(void);
-bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
-bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
- size_t *old_limit, size_t *new_limit);
-unsigned arena_nthreads_get(arena_t *arena, bool internal);
-void arena_nthreads_inc(arena_t *arena, bool internal);
-void arena_nthreads_dec(arena_t *arena, bool internal);
-arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
-bool arena_init_huge(void);
-bool arena_is_huge(unsigned arena_ind);
-arena_t *arena_choose_huge(tsd_t *tsd);
-bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
- unsigned *binshard);
-size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
- void **ptrs, size_t nfill, bool zero);
-bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
-void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
-
-#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_inlines_a.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_inlines_a.h
deleted file mode 100644
index 8568358c..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_inlines_a.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
-#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
-
-static inline unsigned
-arena_ind_get(const arena_t *arena) {
- return arena->ind;
-}
-
-static inline void
-arena_internal_add(arena_t *arena, size_t size) {
- atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
-}
-
-static inline void
-arena_internal_sub(arena_t *arena, size_t size) {
- atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
-}
-
-static inline size_t
-arena_internal_get(arena_t *arena) {
- return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
-}
-
-#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_inlines_b.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_inlines_b.h
deleted file mode 100644
index fa81537c..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_inlines_b.h
+++ /dev/null
@@ -1,550 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
-#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
-
-#include "jemalloc/internal/div.h"
-#include "jemalloc/internal/emap.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree.h"
-#include "jemalloc/internal/safety_check.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/ticker.h"
-
-static inline arena_t *
-arena_get_from_edata(edata_t *edata) {
- return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
- ATOMIC_RELAXED);
-}
-
-JEMALLOC_ALWAYS_INLINE arena_t *
-arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
- if (arena != NULL) {
- return arena;
- }
-
- /*
- * For huge allocations, use the dedicated huge arena if both are true:
- * 1) is using auto arena selection (i.e. arena == NULL), and 2) the
- * thread is not assigned to a manual arena.
- */
- if (unlikely(size >= oversize_threshold)) {
- arena_t *tsd_arena = tsd_arena_get(tsd);
- if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
- return arena_choose_huge(tsd);
- }
- }
-
- return arena_choose(tsd, NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
- prof_info_t *prof_info, bool reset_recent) {
- cassert(config_prof);
- assert(ptr != NULL);
- assert(prof_info != NULL);
-
- edata_t *edata = NULL;
- bool is_slab;
-
- /* Static check. */
- if (alloc_ctx == NULL) {
- edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
- ptr);
- is_slab = edata_slab_get(edata);
- } else if (unlikely(!(is_slab = alloc_ctx->slab))) {
- edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
- ptr);
- }
-
- if (unlikely(!is_slab)) {
- /* edata must have been initialized at this point. */
- assert(edata != NULL);
- large_prof_info_get(tsd, edata, prof_info, reset_recent);
- } else {
- prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
- /*
- * No need to set other fields in prof_info; they will never be
- * accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
- */
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
- emap_alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- /* Static check. */
- if (alloc_ctx == NULL) {
- edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
- &arena_emap_global, ptr);
- if (unlikely(!edata_slab_get(edata))) {
- large_prof_tctx_reset(edata);
- }
- } else {
- if (unlikely(!alloc_ctx->slab)) {
- edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
- &arena_emap_global, ptr);
- large_prof_tctx_reset(edata);
- }
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
- ptr);
- assert(!edata_slab_get(edata));
-
- large_prof_tctx_reset(edata);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
- size_t size) {
- cassert(config_prof);
-
- assert(!edata_slab_get(edata));
- large_prof_info_set(edata, tctx, size);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
- if (unlikely(tsdn_null(tsdn))) {
- return;
- }
- tsd_t *tsd = tsdn_tsd(tsdn);
- /*
- * We use the ticker_geom_t to avoid having per-arena state in the tsd.
- * Instead of having a countdown-until-decay timer running for every
- * arena in every thread, we flip a coin once per tick, whose
- * probability of coming up heads is 1/nticks; this is effectively the
- * operation of the ticker_geom_t. Each arena has the same chance of a
- * coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can
- * use a single ticker for all of them.
- */
- ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd);
- uint64_t *prng_state = tsd_prng_statep_get(tsd);
- if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) {
- arena_decay(tsdn, arena, false, false);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
- arena_decay_ticks(tsdn, arena, 1);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool slow_path) {
- assert(!tsdn_null(tsdn) || tcache == NULL);
-
- if (likely(tcache != NULL)) {
- if (likely(size <= SC_SMALL_MAXCLASS)) {
- return tcache_alloc_small(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path);
- }
- if (likely(size <= tcache_maxclass)) {
- return tcache_alloc_large(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path);
- }
- /* (size > tcache_maxclass) case falls through. */
- assert(size > tcache_maxclass);
- }
-
- return arena_malloc_hard(tsdn, arena, size, ind, zero);
-}
-
-JEMALLOC_ALWAYS_INLINE arena_t *
-arena_aalloc(tsdn_t *tsdn, const void *ptr) {
- edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
- unsigned arena_ind = edata_arena_ind_get(edata);
- return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(tsdn_t *tsdn, const void *ptr) {
- assert(ptr != NULL);
- emap_alloc_ctx_t alloc_ctx;
- emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
- assert(alloc_ctx.szind != SC_NSIZES);
-
- return sz_index2size(alloc_ctx.szind);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
- /*
- * Return 0 if ptr is not within an extent managed by jemalloc. This
- * function has two extra costs relative to isalloc():
- * - The rtree calls cannot claim to be dependent lookups, which induces
- * rtree lookup load dependencies.
- * - The lookup may fail, so there is an extra branch to check for
- * failure.
- */
-
- emap_full_alloc_ctx_t full_alloc_ctx;
- bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
- ptr, &full_alloc_ctx);
- if (missing) {
- return 0;
- }
-
- if (full_alloc_ctx.edata == NULL) {
- return 0;
- }
- assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
- /* Only slab members should be looked up via interior pointers. */
- assert(edata_addr_get(full_alloc_ctx.edata) == ptr
- || edata_slab_get(full_alloc_ctx.edata));
-
- assert(full_alloc_ctx.szind != SC_NSIZES);
-
- return sz_index2size(full_alloc_ctx.szind);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) {
- if (!config_opt_safety_checks) {
- return false;
- }
-
- /*
- * Eagerly detect double free and sized dealloc bugs for large sizes.
- * The cost is low enough (as edata will be accessed anyway) to be
- * enabled all the time.
- */
- if (unlikely(edata == NULL ||
- edata_state_get(edata) != extent_state_active)) {
- safety_check_fail("Invalid deallocation detected: "
- "pages being freed (%p) not currently active, "
- "possibly caused by double free bugs.",
- (uintptr_t)edata_addr_get(edata));
- return true;
- }
- size_t input_size = sz_index2size(szind);
- if (unlikely(input_size != edata_usize_get(edata))) {
- safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
- /* true_size */ edata_usize_get(edata), input_size);
- return true;
- }
-
- return false;
-}
-
-static inline void
-arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
- if (config_prof && unlikely(szind < SC_NBINS)) {
- arena_dalloc_promoted(tsdn, ptr, NULL, true);
- } else {
- edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
- ptr);
- if (large_dalloc_safety_checks(edata, ptr, szind)) {
- /* See the comment in isfree. */
- return;
- }
- large_dalloc(tsdn, edata);
- }
-}
-
-static inline void
-arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
- assert(ptr != NULL);
-
- emap_alloc_ctx_t alloc_ctx;
- emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
-
- if (config_debug) {
- edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
- ptr);
- assert(alloc_ctx.szind == edata_szind_get(edata));
- assert(alloc_ctx.szind < SC_NSIZES);
- assert(alloc_ctx.slab == edata_slab_get(edata));
- }
-
- if (likely(alloc_ctx.slab)) {
- /* Small allocation. */
- arena_dalloc_small(tsdn, ptr);
- } else {
- arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
- bool slow_path) {
- if (szind < nhbins) {
- if (config_prof && unlikely(szind < SC_NBINS)) {
- arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
- } else {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
- slow_path);
- }
- } else {
- edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
- ptr);
- if (large_dalloc_safety_checks(edata, ptr, szind)) {
- /* See the comment in isfree. */
- return;
- }
- large_dalloc(tsdn, edata);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
- emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
- assert(!tsdn_null(tsdn) || tcache == NULL);
- assert(ptr != NULL);
-
- if (unlikely(tcache == NULL)) {
- arena_dalloc_no_tcache(tsdn, ptr);
- return;
- }
-
- emap_alloc_ctx_t alloc_ctx;
- if (caller_alloc_ctx != NULL) {
- alloc_ctx = *caller_alloc_ctx;
- } else {
- util_assume(!tsdn_null(tsdn));
- emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
- &alloc_ctx);
- }
-
- if (config_debug) {
- edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
- ptr);
- assert(alloc_ctx.szind == edata_szind_get(edata));
- assert(alloc_ctx.szind < SC_NSIZES);
- assert(alloc_ctx.slab == edata_slab_get(edata));
- }
-
- if (likely(alloc_ctx.slab)) {
- /* Small allocation. */
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- alloc_ctx.szind, slow_path);
- } else {
- arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
- slow_path);
- }
-}
-
-static inline void
-arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
- assert(ptr != NULL);
- assert(size <= SC_LARGE_MAXCLASS);
-
- emap_alloc_ctx_t alloc_ctx;
- if (!config_prof || !opt_prof) {
- /*
- * There is no risk of being confused by a promoted sampled
- * object, so base szind and slab on the given size.
- */
- alloc_ctx.szind = sz_size2index(size);
- alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
- }
-
- if ((config_prof && opt_prof) || config_debug) {
- emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
- &alloc_ctx);
-
- assert(alloc_ctx.szind == sz_size2index(size));
- assert((config_prof && opt_prof)
- || alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
-
- if (config_debug) {
- edata_t *edata = emap_edata_lookup(tsdn,
- &arena_emap_global, ptr);
- assert(alloc_ctx.szind == edata_szind_get(edata));
- assert(alloc_ctx.slab == edata_slab_get(edata));
- }
- }
-
- if (likely(alloc_ctx.slab)) {
- /* Small allocation. */
- arena_dalloc_small(tsdn, ptr);
- } else {
- arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
- assert(!tsdn_null(tsdn) || tcache == NULL);
- assert(ptr != NULL);
- assert(size <= SC_LARGE_MAXCLASS);
-
- if (unlikely(tcache == NULL)) {
- arena_sdalloc_no_tcache(tsdn, ptr, size);
- return;
- }
-
- emap_alloc_ctx_t alloc_ctx;
- if (config_prof && opt_prof) {
- if (caller_alloc_ctx == NULL) {
- /* Uncommon case and should be a static check. */
- emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
- &alloc_ctx);
- assert(alloc_ctx.szind == sz_size2index(size));
- } else {
- alloc_ctx = *caller_alloc_ctx;
- }
- } else {
- /*
- * There is no risk of being confused by a promoted sampled
- * object, so base szind and slab on the given size.
- */
- alloc_ctx.szind = sz_size2index(size);
- alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
- }
-
- if (config_debug) {
- edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
- ptr);
- assert(alloc_ctx.szind == edata_szind_get(edata));
- assert(alloc_ctx.slab == edata_slab_get(edata));
- }
-
- if (likely(alloc_ctx.slab)) {
- /* Small allocation. */
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- alloc_ctx.szind, slow_path);
- } else {
- arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
- slow_path);
- }
-}
-
-static inline void
-arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
- size_t alignment) {
- assert(edata_base_get(edata) == edata_addr_get(edata));
-
- if (alignment < PAGE) {
- unsigned lg_range = LG_PAGE -
- lg_floor(CACHELINE_CEILING(alignment));
- size_t r;
- if (!tsdn_null(tsdn)) {
- tsd_t *tsd = tsdn_tsd(tsdn);
- r = (size_t)prng_lg_range_u64(
- tsd_prng_statep_get(tsd), lg_range);
- } else {
- uint64_t stack_value = (uint64_t)(uintptr_t)&r;
- r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
- }
- uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
- lg_range);
- edata->e_addr = (void *)((uintptr_t)edata->e_addr +
- random_offset);
- assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
- edata->e_addr);
- }
-}
-
-/*
- * The dalloc bin info contains just the information that the common paths need
- * during tcache flushes. By force-inlining these paths, and using local copies
- * of data (so that the compiler knows it's constant), we avoid a whole bunch of
- * redundant loads and stores by leaving this information in registers.
- */
-typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
-struct arena_dalloc_bin_locked_info_s {
- div_info_t div_info;
- uint32_t nregs;
- uint64_t ndalloc;
-};
-
-JEMALLOC_ALWAYS_INLINE size_t
-arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
- edata_t *slab, const void *ptr) {
- size_t diff, regind;
-
- /* Freeing a pointer outside the slab can cause assertion failure. */
- assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
- assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
- (uintptr_t)bin_infos[binind].reg_size == 0);
-
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
-
- /* Avoid doing division with a variable divisor. */
- regind = div_compute(&info->div_info, diff);
-
- assert(regind < bin_infos[binind].nregs);
-
- return regind;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info,
- szind_t binind) {
- info->div_info = arena_binind_div_info[binind];
- info->nregs = bin_infos[binind].nregs;
- info->ndalloc = 0;
-}
-
-/*
- * Does the deallocation work associated with freeing a single pointer (a
- * "step") in between a arena_dalloc_bin_locked begin and end call.
- *
- * Returns true if arena_slab_dalloc must be called on slab. Doesn't do
- * stats updates, which happen during finish (this lets running counts get left
- * in a register).
- */
-JEMALLOC_ALWAYS_INLINE bool
-arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab,
- void *ptr) {
- const bin_info_t *bin_info = &bin_infos[binind];
- size_t regind = arena_slab_regind(info, binind, slab, ptr);
- slab_data_t *slab_data = edata_slab_data_get(slab);
-
- assert(edata_nfree_get(slab) < bin_info->nregs);
- /* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
-
- bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
- edata_nfree_inc(slab);
-
- if (config_stats) {
- info->ndalloc++;
- }
-
- unsigned nfree = edata_nfree_get(slab);
- if (nfree == bin_info->nregs) {
- arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab,
- bin);
- return true;
- } else if (nfree == 1 && slab != bin->slabcur) {
- arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab,
- bin);
- }
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- arena_dalloc_bin_locked_info_t *info) {
- if (config_stats) {
- bin->stats.ndalloc += info->ndalloc;
- assert(bin->stats.curregs >= (size_t)info->ndalloc);
- bin->stats.curregs -= (size_t)info->ndalloc;
- }
-}
-
-static inline bin_t *
-arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
- bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]);
- return shard0 + binshard;
-}
-
-#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_stats.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_stats.h
deleted file mode 100644
index 15f1d345..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_stats.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
-#define JEMALLOC_INTERNAL_ARENA_STATS_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/lockedint.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_prof.h"
-#include "jemalloc/internal/pa.h"
-#include "jemalloc/internal/sc.h"
-
-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-
-typedef struct arena_stats_large_s arena_stats_large_t;
-struct arena_stats_large_s {
- /*
- * Total number of allocation/deallocation requests served directly by
- * the arena.
- */
- locked_u64_t nmalloc;
- locked_u64_t ndalloc;
-
- /*
- * Number of allocation requests that correspond to this size class.
- * This includes requests served by tcache, though tcache only
- * periodically merges into this counter.
- */
- locked_u64_t nrequests; /* Partially derived. */
- /*
- * Number of tcache fills / flushes for large (similarly, periodically
- * merged). Note that there is no large tcache batch-fill currently
- * (i.e. only fill 1 at a time); however flush may be batched.
- */
- locked_u64_t nfills; /* Partially derived. */
- locked_u64_t nflushes; /* Partially derived. */
-
- /* Current number of allocations of this size class. */
- size_t curlextents; /* Derived. */
-};
-
-/*
- * Arena stats. Note that fields marked "derived" are not directly maintained
- * within the arena code; rather their values are derived during stats merge
- * requests.
- */
-typedef struct arena_stats_s arena_stats_t;
-struct arena_stats_s {
- LOCKEDINT_MTX_DECLARE(mtx)
-
- /*
- * resident includes the base stats -- that's why it lives here and not
- * in pa_shard_stats_t.
- */
- size_t base; /* Derived. */
- size_t resident; /* Derived. */
- size_t metadata_thp; /* Derived. */
- size_t mapped; /* Derived. */
-
- atomic_zu_t internal;
-
- size_t allocated_large; /* Derived. */
- uint64_t nmalloc_large; /* Derived. */
- uint64_t ndalloc_large; /* Derived. */
- uint64_t nfills_large; /* Derived. */
- uint64_t nflushes_large; /* Derived. */
- uint64_t nrequests_large; /* Derived. */
-
- /*
- * The stats logically owned by the pa_shard in the same arena. This
- * lives here only because it's convenient for the purposes of the ctl
- * module -- it only knows about the single arena_stats.
- */
- pa_shard_stats_t pa_shard_stats;
-
- /* Number of bytes cached in tcache associated with this arena. */
- size_t tcache_bytes; /* Derived. */
- size_t tcache_stashed_bytes; /* Derived. */
-
- mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
-
- /* One element for each large size class. */
- arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
-
- /* Arena uptime. */
- nstime_t uptime;
-};
-
-static inline bool
-arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
- if (config_debug) {
- for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
- assert(((char *)arena_stats)[i] == 0);
- }
- }
- if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
- WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
- return true;
- }
- /* Memory is zeroed, so there is no need to clear stats. */
- return false;
-}
-
-static inline void
-arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
- szind_t szind, uint64_t nrequests) {
- LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
- arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
- locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
- &lstats->nrequests, nrequests);
- locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
- &lstats->nflushes, 1);
- LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
-}
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_structs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_structs.h
deleted file mode 100644
index e2a5a408..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_structs.h
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
-#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
-
-#include "jemalloc/internal/arena_stats.h"
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bin.h"
-#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/counter.h"
-#include "jemalloc/internal/ecache.h"
-#include "jemalloc/internal/edata_cache.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/pa.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/ticker.h"
-
-struct arena_s {
- /*
- * Number of threads currently assigned to this arena. Each thread has
- * two distinct assignments, one for application-serving allocation, and
- * the other for internal metadata allocation. Internal metadata must
- * not be allocated from arenas explicitly created via the arenas.create
- * mallctl, because the arena.<i>.reset mallctl indiscriminately
- * discards all allocations for the affected arena.
- *
- * 0: Application allocation.
- * 1: Internal metadata allocation.
- *
- * Synchronization: atomic.
- */
- atomic_u_t nthreads[2];
-
- /* Next bin shard for binding new threads. Synchronization: atomic. */
- atomic_u_t binshard_next;
-
- /*
- * When percpu_arena is enabled, to amortize the cost of reading /
- * updating the current CPU id, track the most recent thread accessing
- * this arena, and only read CPU if there is a mismatch.
- */
- tsdn_t *last_thd;
-
- /* Synchronization: internal. */
- arena_stats_t stats;
-
- /*
- * Lists of tcaches and cache_bin_array_descriptors for extant threads
- * associated with this arena. Stats from these are merged
- * incrementally, and at exit if opt_stats_print is enabled.
- *
- * Synchronization: tcache_ql_mtx.
- */
- ql_head(tcache_slow_t) tcache_ql;
- ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
- malloc_mutex_t tcache_ql_mtx;
-
- /*
- * Represents a dss_prec_t, but atomically.
- *
- * Synchronization: atomic.
- */
- atomic_u_t dss_prec;
-
- /*
- * Extant large allocations.
- *
- * Synchronization: large_mtx.
- */
- edata_list_active_t large;
- /* Synchronizes all large allocation/update/deallocation. */
- malloc_mutex_t large_mtx;
-
- /* The page-level allocator shard this arena uses. */
- pa_shard_t pa_shard;
-
- /*
- * A cached copy of base->ind. This can get accessed on hot paths;
- * looking it up in base requires an extra pointer hop / cache miss.
- */
- unsigned ind;
-
- /*
- * Base allocator, from which arena metadata are allocated.
- *
- * Synchronization: internal.
- */
- base_t *base;
- /* Used to determine uptime. Read-only after initialization. */
- nstime_t create_time;
-
- /*
- * The arena is allocated alongside its bins; really this is a
- * dynamically sized array determined by the binshard settings.
- */
- bin_t bins[0];
-};
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_types.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_types.h
deleted file mode 100644
index d0e12917..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/arena_types.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
-#define JEMALLOC_INTERNAL_ARENA_TYPES_H
-
-#include "jemalloc/internal/sc.h"
-
-/* Default decay times in milliseconds. */
-#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
-#define MUZZY_DECAY_MS_DEFAULT (0)
-/* Number of event ticks between time checks. */
-#define ARENA_DECAY_NTICKS_PER_UPDATE 1000
-
-typedef struct arena_decay_s arena_decay_t;
-typedef struct arena_s arena_t;
-
-typedef enum {
- percpu_arena_mode_names_base = 0, /* Used for options processing. */
-
- /*
- * *_uninit are used only during bootstrapping, and must correspond
- * to initialized variant plus percpu_arena_mode_enabled_base.
- */
- percpu_arena_uninit = 0,
- per_phycpu_arena_uninit = 1,
-
- /* All non-disabled modes must come after percpu_arena_disabled. */
- percpu_arena_disabled = 2,
-
- percpu_arena_mode_names_limit = 3, /* Used for options processing. */
- percpu_arena_mode_enabled_base = 3,
-
- percpu_arena = 3,
- per_phycpu_arena = 4 /* Hyper threads share arena. */
-} percpu_arena_mode_t;
-
-#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
-#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
-
-/*
- * When allocation_size >= oversize_threshold, use the dedicated huge arena
- * (unless have explicitly spicified arena index). 0 disables the feature.
- */
-#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
-
-struct arena_config_s {
- /* extent hooks to be used for the arena */
- extent_hooks_t *extent_hooks;
-
- /*
- * Use extent hooks for metadata (base) allocations when true.
- */
- bool metadata_use_hooks;
-};
-
-typedef struct arena_config_s arena_config_t;
-
-extern const arena_config_t arena_config_default;
-
-#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/assert.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/assert.h
deleted file mode 100644
index be4d45b3..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/assert.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/util.h"
-
-/*
- * Define a custom assert() in order to reduce the chances of deadlock during
- * assertion failure.
- */
-#ifndef assert
-#define assert(e) do { \
- if (unlikely(config_debug && !(e))) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
- __FILE__, __LINE__, #e); \
- abort(); \
- } \
-} while (0)
-#endif
-
-#ifndef not_reached
-#define not_reached() do { \
- if (config_debug) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Unreachable code reached\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
- unreachable(); \
-} while (0)
-#endif
-
-#ifndef not_implemented
-#define not_implemented() do { \
- if (config_debug) { \
- malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
-} while (0)
-#endif
-
-#ifndef assert_not_implemented
-#define assert_not_implemented(e) do { \
- if (unlikely(config_debug && !(e))) { \
- not_implemented(); \
- } \
-} while (0)
-#endif
-
-/* Use to assert a particular configuration, e.g., cassert(config_debug). */
-#ifndef cassert
-#define cassert(c) do { \
- if (unlikely(!(c))) { \
- not_reached(); \
- } \
-} while (0)
-#endif
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic.h
deleted file mode 100644
index c0f73122..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic.h
+++ /dev/null
@@ -1,107 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_H
-#define JEMALLOC_INTERNAL_ATOMIC_H
-
-#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
-
-#define JEMALLOC_U8_ATOMICS
-#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
-# include "jemalloc/internal/atomic_gcc_atomic.h"
-# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
-# undef JEMALLOC_U8_ATOMICS
-# endif
-#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
-# include "jemalloc/internal/atomic_gcc_sync.h"
-# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
-# undef JEMALLOC_U8_ATOMICS
-# endif
-#elif defined(_MSC_VER)
-# include "jemalloc/internal/atomic_msvc.h"
-#elif defined(JEMALLOC_C11_ATOMICS)
-# include "jemalloc/internal/atomic_c11.h"
-#else
-# error "Don't have atomics implemented on this platform."
-#endif
-
-/*
- * This header gives more or less a backport of C11 atomics. The user can write
- * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
- * counterparts of the C11 atomic functions for type, as so:
- * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
- * and then write things like:
- * int *some_ptr;
- * atomic_pi_t atomic_ptr_to_int;
- * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
- * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
- * assert(some_ptr == prev_value);
- * and expect things to work in the obvious way.
- *
- * Also included (with naming differences to avoid conflicts with the standard
- * library):
- * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
- * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
- */
-
-/*
- * Pure convenience, so that we don't have to type "atomic_memory_order_"
- * quite so often.
- */
-#define ATOMIC_RELAXED atomic_memory_order_relaxed
-#define ATOMIC_ACQUIRE atomic_memory_order_acquire
-#define ATOMIC_RELEASE atomic_memory_order_release
-#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
-#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
-
-/*
- * Another convenience -- simple atomic helper functions.
- */
-#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \
- lg_size) \
- JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
- ATOMIC_INLINE void \
- atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \
- type inc) { \
- type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
- type newval = oldval + inc; \
- atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
- } \
- ATOMIC_INLINE void \
- atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \
- type inc) { \
- type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
- type newval = oldval - inc; \
- atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
- }
-
-/*
- * Not all platforms have 64-bit atomics. If we do, this #define exposes that
- * fact.
- */
-#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-# define JEMALLOC_ATOMIC_U64
-#endif
-
-JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
-
-/*
- * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
- * platform that actually needs to know the size, MSVC.
- */
-JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
-
-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
-
-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
-
-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
-
-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0)
-
-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2)
-
-#ifdef JEMALLOC_ATOMIC_U64
-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3)
-#endif
-
-#undef ATOMIC_INLINE
-
-#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_c11.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_c11.h
deleted file mode 100644
index a5f9313a..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_c11.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
-#define JEMALLOC_INTERNAL_ATOMIC_C11_H
-
-#include <stdatomic.h>
-
-#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
-
-#define atomic_memory_order_t memory_order
-#define atomic_memory_order_relaxed memory_order_relaxed
-#define atomic_memory_order_acquire memory_order_acquire
-#define atomic_memory_order_release memory_order_release
-#define atomic_memory_order_acq_rel memory_order_acq_rel
-#define atomic_memory_order_seq_cst memory_order_seq_cst
-
-#define atomic_fence atomic_thread_fence
-
-#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
- /* unused */ lg_size) \
-typedef _Atomic(type) atomic_##short_type##_t; \
- \
-ATOMIC_INLINE type \
-atomic_load_##short_type(const atomic_##short_type##_t *a, \
- atomic_memory_order_t mo) { \
- /* \
- * A strict interpretation of the C standard prevents \
- * atomic_load from taking a const argument, but it's \
- * convenient for our purposes. This cast is a workaround. \
- */ \
- atomic_##short_type##_t* a_nonconst = \
- (atomic_##short_type##_t*)a; \
- return atomic_load_explicit(a_nonconst, mo); \
-} \
- \
-ATOMIC_INLINE void \
-atomic_store_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- atomic_store_explicit(a, val, mo); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return atomic_exchange_explicit(a, val, mo); \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
- type *expected, type desired, atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- return atomic_compare_exchange_weak_explicit(a, expected, \
- desired, success_mo, failure_mo); \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
- type *expected, type desired, atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- return atomic_compare_exchange_strong_explicit(a, expected, \
- desired, success_mo, failure_mo); \
-}
-
-/*
- * Integral types have some special operations available that non-integral ones
- * lack.
- */
-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
- /* unused */ lg_size) \
-JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
- \
-ATOMIC_INLINE type \
-atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return atomic_fetch_add_explicit(a, val, mo); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return atomic_fetch_sub_explicit(a, val, mo); \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return atomic_fetch_and_explicit(a, val, mo); \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return atomic_fetch_or_explicit(a, val, mo); \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return atomic_fetch_xor_explicit(a, val, mo); \
-}
-
-#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_gcc_atomic.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_gcc_atomic.h
deleted file mode 100644
index 471515e8..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_gcc_atomic.h
+++ /dev/null
@@ -1,129 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
-#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
-
-#include "jemalloc/internal/assert.h"
-
-#define ATOMIC_INIT(...) {__VA_ARGS__}
-
-typedef enum {
- atomic_memory_order_relaxed,
- atomic_memory_order_acquire,
- atomic_memory_order_release,
- atomic_memory_order_acq_rel,
- atomic_memory_order_seq_cst
-} atomic_memory_order_t;
-
-ATOMIC_INLINE int
-atomic_enum_to_builtin(atomic_memory_order_t mo) {
- switch (mo) {
- case atomic_memory_order_relaxed:
- return __ATOMIC_RELAXED;
- case atomic_memory_order_acquire:
- return __ATOMIC_ACQUIRE;
- case atomic_memory_order_release:
- return __ATOMIC_RELEASE;
- case atomic_memory_order_acq_rel:
- return __ATOMIC_ACQ_REL;
- case atomic_memory_order_seq_cst:
- return __ATOMIC_SEQ_CST;
- }
- /* Can't happen; the switch is exhaustive. */
- not_reached();
-}
-
-ATOMIC_INLINE void
-atomic_fence(atomic_memory_order_t mo) {
- __atomic_thread_fence(atomic_enum_to_builtin(mo));
-}
-
-#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
- /* unused */ lg_size) \
-typedef struct { \
- type repr; \
-} atomic_##short_type##_t; \
- \
-ATOMIC_INLINE type \
-atomic_load_##short_type(const atomic_##short_type##_t *a, \
- atomic_memory_order_t mo) { \
- type result; \
- __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
- return result; \
-} \
- \
-ATOMIC_INLINE void \
-atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- type result; \
- __atomic_exchange(&a->repr, &val, &result, \
- atomic_enum_to_builtin(mo)); \
- return result; \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
- UNUSED type *expected, type desired, \
- atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- return __atomic_compare_exchange(&a->repr, expected, &desired, \
- true, atomic_enum_to_builtin(success_mo), \
- atomic_enum_to_builtin(failure_mo)); \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
- UNUSED type *expected, type desired, \
- atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- return __atomic_compare_exchange(&a->repr, expected, &desired, \
- false, \
- atomic_enum_to_builtin(success_mo), \
- atomic_enum_to_builtin(failure_mo)); \
-}
-
-
-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
- /* unused */ lg_size) \
-JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
- \
-ATOMIC_INLINE type \
-atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_add(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_sub(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_and(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_or(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __atomic_fetch_xor(&a->repr, val, \
- atomic_enum_to_builtin(mo)); \
-}
-
-#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_gcc_sync.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_gcc_sync.h
deleted file mode 100644
index e02b7cbe..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_gcc_sync.h
+++ /dev/null
@@ -1,195 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
-#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
-
-#define ATOMIC_INIT(...) {__VA_ARGS__}
-
-typedef enum {
- atomic_memory_order_relaxed,
- atomic_memory_order_acquire,
- atomic_memory_order_release,
- atomic_memory_order_acq_rel,
- atomic_memory_order_seq_cst
-} atomic_memory_order_t;
-
-ATOMIC_INLINE void
-atomic_fence(atomic_memory_order_t mo) {
- /* Easy cases first: no barrier, and full barrier. */
- if (mo == atomic_memory_order_relaxed) {
- asm volatile("" ::: "memory");
- return;
- }
- if (mo == atomic_memory_order_seq_cst) {
- asm volatile("" ::: "memory");
- __sync_synchronize();
- asm volatile("" ::: "memory");
- return;
- }
- asm volatile("" ::: "memory");
-# if defined(__i386__) || defined(__x86_64__)
- /* This is implicit on x86. */
-# elif defined(__ppc64__)
- asm volatile("lwsync");
-# elif defined(__ppc__)
- asm volatile("sync");
-# elif defined(__sparc__) && defined(__arch64__)
- if (mo == atomic_memory_order_acquire) {
- asm volatile("membar #LoadLoad | #LoadStore");
- } else if (mo == atomic_memory_order_release) {
- asm volatile("membar #LoadStore | #StoreStore");
- } else {
- asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
- }
-# else
- __sync_synchronize();
-# endif
- asm volatile("" ::: "memory");
-}
-
-/*
- * A correct implementation of seq_cst loads and stores on weakly ordered
- * architectures could do either of the following:
- * 1. store() is weak-fence -> store -> strong fence, load() is load ->
- * strong-fence.
- * 2. store() is strong-fence -> store, load() is strong-fence -> load ->
- * weak-fence.
- * The tricky thing is, load() and store() above can be the load or store
- * portions of a gcc __sync builtin, so we have to follow GCC's lead, which
- * means going with strategy 2.
- * On strongly ordered architectures, the natural strategy is to stick a strong
- * fence after seq_cst stores, and have naked loads. So we want the strong
- * fences in different places on different architectures.
- * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
- * accomplish this.
- */
-
-ATOMIC_INLINE void
-atomic_pre_sc_load_fence() {
-# if defined(__i386__) || defined(__x86_64__) || \
- (defined(__sparc__) && defined(__arch64__))
- atomic_fence(atomic_memory_order_relaxed);
-# else
- atomic_fence(atomic_memory_order_seq_cst);
-# endif
-}
-
-ATOMIC_INLINE void
-atomic_post_sc_store_fence() {
-# if defined(__i386__) || defined(__x86_64__) || \
- (defined(__sparc__) && defined(__arch64__))
- atomic_fence(atomic_memory_order_seq_cst);
-# else
- atomic_fence(atomic_memory_order_relaxed);
-# endif
-
-}
-
-#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
- /* unused */ lg_size) \
-typedef struct { \
- type volatile repr; \
-} atomic_##short_type##_t; \
- \
-ATOMIC_INLINE type \
-atomic_load_##short_type(const atomic_##short_type##_t *a, \
- atomic_memory_order_t mo) { \
- if (mo == atomic_memory_order_seq_cst) { \
- atomic_pre_sc_load_fence(); \
- } \
- type result = a->repr; \
- if (mo != atomic_memory_order_relaxed) { \
- atomic_fence(atomic_memory_order_acquire); \
- } \
- return result; \
-} \
- \
-ATOMIC_INLINE void \
-atomic_store_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- if (mo != atomic_memory_order_relaxed) { \
- atomic_fence(atomic_memory_order_release); \
- } \
- a->repr = val; \
- if (mo == atomic_memory_order_seq_cst) { \
- atomic_post_sc_store_fence(); \
- } \
-} \
- \
-ATOMIC_INLINE type \
-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- /* \
- * Because of FreeBSD, we care about gcc 4.2, which doesn't have\
- * an atomic exchange builtin. We fake it with a CAS loop. \
- */ \
- while (true) { \
- type old = a->repr; \
- if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \
- return old; \
- } \
- } \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
- type *expected, type desired, \
- atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
- desired); \
- if (prev == *expected) { \
- return true; \
- } else { \
- *expected = prev; \
- return false; \
- } \
-} \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
- type *expected, type desired, \
- atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
- desired); \
- if (prev == *expected) { \
- return true; \
- } else { \
- *expected = prev; \
- return false; \
- } \
-}
-
-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
- /* unused */ lg_size) \
-JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
- \
-ATOMIC_INLINE type \
-atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __sync_fetch_and_add(&a->repr, val); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __sync_fetch_and_sub(&a->repr, val); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __sync_fetch_and_and(&a->repr, val); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __sync_fetch_and_or(&a->repr, val); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return __sync_fetch_and_xor(&a->repr, val); \
-}
-
-#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_msvc.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_msvc.h
deleted file mode 100644
index 67057ce5..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/atomic_msvc.h
+++ /dev/null
@@ -1,158 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
-#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
-
-#define ATOMIC_INIT(...) {__VA_ARGS__}
-
-typedef enum {
- atomic_memory_order_relaxed,
- atomic_memory_order_acquire,
- atomic_memory_order_release,
- atomic_memory_order_acq_rel,
- atomic_memory_order_seq_cst
-} atomic_memory_order_t;
-
-typedef char atomic_repr_0_t;
-typedef short atomic_repr_1_t;
-typedef long atomic_repr_2_t;
-typedef __int64 atomic_repr_3_t;
-
-ATOMIC_INLINE void
-atomic_fence(atomic_memory_order_t mo) {
- _ReadWriteBarrier();
-# if defined(_M_ARM) || defined(_M_ARM64)
- /* ARM needs a barrier for everything but relaxed. */
- if (mo != atomic_memory_order_relaxed) {
- MemoryBarrier();
- }
-# elif defined(_M_IX86) || defined (_M_X64)
- /* x86 needs a barrier only for seq_cst. */
- if (mo == atomic_memory_order_seq_cst) {
- MemoryBarrier();
- }
-# else
-# error "Don't know how to create atomics for this platform for MSVC."
-# endif
- _ReadWriteBarrier();
-}
-
-#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
-
-#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
-#define ATOMIC_RAW_CONCAT(a, b) a ## b
-
-#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
- base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
-
-#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
- ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
-
-#define ATOMIC_INTERLOCKED_SUFFIX_0 8
-#define ATOMIC_INTERLOCKED_SUFFIX_1 16
-#define ATOMIC_INTERLOCKED_SUFFIX_2
-#define ATOMIC_INTERLOCKED_SUFFIX_3 64
-
-#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
-typedef struct { \
- ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
-} atomic_##short_type##_t; \
- \
-ATOMIC_INLINE type \
-atomic_load_##short_type(const atomic_##short_type##_t *a, \
- atomic_memory_order_t mo) { \
- ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
- if (mo != atomic_memory_order_relaxed) { \
- atomic_fence(atomic_memory_order_acquire); \
- } \
- return (type) ret; \
-} \
- \
-ATOMIC_INLINE void \
-atomic_store_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- if (mo != atomic_memory_order_relaxed) { \
- atomic_fence(atomic_memory_order_release); \
- } \
- a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
- if (mo == atomic_memory_order_seq_cst) { \
- atomic_fence(atomic_memory_order_seq_cst); \
- } \
-} \
- \
-ATOMIC_INLINE type \
-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
- atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
- lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
- type *expected, type desired, atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- ATOMIC_INTERLOCKED_REPR(lg_size) e = \
- (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
- ATOMIC_INTERLOCKED_REPR(lg_size) d = \
- (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
- ATOMIC_INTERLOCKED_REPR(lg_size) old = \
- ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
- lg_size)(&a->repr, d, e); \
- if (old == e) { \
- return true; \
- } else { \
- *expected = (type)old; \
- return false; \
- } \
-} \
- \
-ATOMIC_INLINE bool \
-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
- type *expected, type desired, atomic_memory_order_t success_mo, \
- atomic_memory_order_t failure_mo) { \
- /* We implement the weak version with strong semantics. */ \
- return atomic_compare_exchange_weak_##short_type(a, expected, \
- desired, success_mo, failure_mo); \
-}
-
-
-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
-JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
- \
-ATOMIC_INLINE type \
-atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
- lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-} \
- \
-ATOMIC_INLINE type \
-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- /* \
- * MSVC warns on negation of unsigned operands, but for us it \
- * gives exactly the right semantics (MAX_TYPE + 1 - operand). \
- */ \
- __pragma(warning(push)) \
- __pragma(warning(disable: 4146)) \
- return atomic_fetch_add_##short_type(a, -val, mo); \
- __pragma(warning(pop)) \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
- &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
- &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-} \
-ATOMIC_INLINE type \
-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
- type val, atomic_memory_order_t mo) { \
- return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
- &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
-}
-
-#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_externs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_externs.h
deleted file mode 100644
index 6ae3c8d8..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_externs.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
-
-extern bool opt_background_thread;
-extern size_t opt_max_background_threads;
-extern malloc_mutex_t background_thread_lock;
-extern atomic_b_t background_thread_enabled_state;
-extern size_t n_background_threads;
-extern size_t max_background_threads;
-extern background_thread_info_t *background_thread_info;
-
-bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
-bool background_threads_enable(tsd_t *tsd);
-bool background_threads_disable(tsd_t *tsd);
-bool background_thread_is_started(background_thread_info_t* info);
-void background_thread_wakeup_early(background_thread_info_t *info,
- nstime_t *remaining_sleep);
-void background_thread_prefork0(tsdn_t *tsdn);
-void background_thread_prefork1(tsdn_t *tsdn);
-void background_thread_postfork_parent(tsdn_t *tsdn);
-void background_thread_postfork_child(tsdn_t *tsdn);
-bool background_thread_stats_read(tsdn_t *tsdn,
- background_thread_stats_t *stats);
-void background_thread_ctl_init(tsdn_t *tsdn);
-
-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
-extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
- void *(*)(void *), void *__restrict);
-#endif
-bool background_thread_boot0(void);
-bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
-
-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_inlines.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_inlines.h
deleted file mode 100644
index 92c5febe..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_inlines.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
-
-JEMALLOC_ALWAYS_INLINE bool
-background_thread_enabled(void) {
- return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-background_thread_enabled_set(tsdn_t *tsdn, bool state) {
- malloc_mutex_assert_owner(tsdn, &background_thread_lock);
- atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
-}
-
-JEMALLOC_ALWAYS_INLINE background_thread_info_t *
-arena_background_thread_info_get(arena_t *arena) {
- unsigned arena_ind = arena_ind_get(arena);
- return &background_thread_info[arena_ind % max_background_threads];
-}
-
-JEMALLOC_ALWAYS_INLINE background_thread_info_t *
-background_thread_info_get(size_t ind) {
- return &background_thread_info[ind % max_background_threads];
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-background_thread_wakeup_time_get(background_thread_info_t *info) {
- uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
- assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
- (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
- return next_wakeup;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
- uint64_t wakeup_time) {
- malloc_mutex_assert_owner(tsdn, &info->mtx);
- atomic_store_b(&info->indefinite_sleep,
- wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
- nstime_init(&info->next_wakeup, wakeup_time);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-background_thread_indefinite_sleep(background_thread_info_t *info) {
- return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
-}
-
-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_structs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_structs.h
deleted file mode 100644
index 83a91984..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/background_thread_structs.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
-
-/* This file really combines "structs" and "types", but only transitionally. */
-
-#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
-# define JEMALLOC_PTHREAD_CREATE_WRAPPER
-#endif
-
-#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
-#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
-#define DEFAULT_NUM_BACKGROUND_THREAD 4
-
-/*
- * These exist only as a transitional state. Eventually, deferral should be
- * part of the PAI, and each implementation can indicate wait times with more
- * specificity.
- */
-#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
-#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
-
-#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
-#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
-
-typedef enum {
- background_thread_stopped,
- background_thread_started,
- /* Thread waits on the global lock when paused (for arena_reset). */
- background_thread_paused,
-} background_thread_state_t;
-
-struct background_thread_info_s {
-#ifdef JEMALLOC_BACKGROUND_THREAD
- /* Background thread is pthread specific. */
- pthread_t thread;
- pthread_cond_t cond;
-#endif
- malloc_mutex_t mtx;
- background_thread_state_t state;
- /* When true, it means no wakeup scheduled. */
- atomic_b_t indefinite_sleep;
- /* Next scheduled wakeup time (absolute time in ns). */
- nstime_t next_wakeup;
- /*
- * Since the last background thread run, newly added number of pages
- * that need to be purged by the next wakeup. This is adjusted on
- * epoch advance, and is used to determine whether we should signal the
- * background thread to wake up earlier.
- */
- size_t npages_to_purge_new;
- /* Stats: total number of runs since started. */
- uint64_t tot_n_runs;
- /* Stats: total sleep time since started. */
- nstime_t tot_sleep_time;
-};
-typedef struct background_thread_info_s background_thread_info_t;
-
-struct background_thread_stats_s {
- size_t num_threads;
- uint64_t num_runs;
- nstime_t run_interval;
- mutex_prof_data_t max_counter_per_bg_thd;
-};
-typedef struct background_thread_stats_s background_thread_stats_t;
-
-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/base.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/base.h
deleted file mode 100644
index 9b2c9fb1..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/base.h
+++ /dev/null
@@ -1,110 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_H
-#define JEMALLOC_INTERNAL_BASE_H
-
-#include "jemalloc/internal/edata.h"
-#include "jemalloc/internal/ehooks.h"
-#include "jemalloc/internal/mutex.h"
-
-enum metadata_thp_mode_e {
- metadata_thp_disabled = 0,
- /*
- * Lazily enable hugepage for metadata. To avoid high RSS caused by THP
- * + low usage arena (i.e. THP becomes a significant percentage), the
- * "auto" option only starts using THP after a base allocator used up
- * the first THP region. Starting from the second hugepage (in a single
- * arena), "auto" behaves the same as "always", i.e. madvise hugepage
- * right away.
- */
- metadata_thp_auto = 1,
- metadata_thp_always = 2,
- metadata_thp_mode_limit = 3
-};
-typedef enum metadata_thp_mode_e metadata_thp_mode_t;
-
-#define METADATA_THP_DEFAULT metadata_thp_disabled
-extern metadata_thp_mode_t opt_metadata_thp;
-extern const char *metadata_thp_mode_names[];
-
-
-/* Embedded at the beginning of every block of base-managed virtual memory. */
-typedef struct base_block_s base_block_t;
-struct base_block_s {
- /* Total size of block's virtual memory mapping. */
- size_t size;
-
- /* Next block in list of base's blocks. */
- base_block_t *next;
-
- /* Tracks unused trailing space. */
- edata_t edata;
-};
-
-typedef struct base_s base_t;
-struct base_s {
- /*
- * User-configurable extent hook functions.
- */
- ehooks_t ehooks;
-
- /*
- * User-configurable extent hook functions for metadata allocations.
- */
- ehooks_t ehooks_base;
-
- /* Protects base_alloc() and base_stats_get() operations. */
- malloc_mutex_t mtx;
-
- /* Using THP when true (metadata_thp auto mode). */
- bool auto_thp_switched;
- /*
- * Most recent size class in the series of increasingly large base
- * extents. Logarithmic spacing between subsequent allocations ensures
- * that the total number of distinct mappings remains small.
- */
- pszind_t pind_last;
-
- /* Serial number generation state. */
- size_t extent_sn_next;
-
- /* Chain of all blocks associated with base. */
- base_block_t *blocks;
-
- /* Heap of extents that track unused trailing space within blocks. */
- edata_heap_t avail[SC_NSIZES];
-
- /* Stats, only maintained if config_stats. */
- size_t allocated;
- size_t resident;
- size_t mapped;
- /* Number of THP regions touched. */
- size_t n_thp;
-};
-
-static inline unsigned
-base_ind_get(const base_t *base) {
- return ehooks_ind_get(&base->ehooks);
-}
-
-static inline bool
-metadata_thp_enabled(void) {
- return (opt_metadata_thp != metadata_thp_disabled);
-}
-
-base_t *b0get(void);
-base_t *base_new(tsdn_t *tsdn, unsigned ind,
- const extent_hooks_t *extent_hooks, bool metadata_use_hooks);
-void base_delete(tsdn_t *tsdn, base_t *base);
-ehooks_t *base_ehooks_get(base_t *base);
-ehooks_t *base_ehooks_get_for_metadata(base_t *base);
-extent_hooks_t *base_extent_hooks_set(base_t *base,
- extent_hooks_t *extent_hooks);
-void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
-edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
-void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
- size_t *resident, size_t *mapped, size_t *n_thp);
-void base_prefork(tsdn_t *tsdn, base_t *base);
-void base_postfork_parent(tsdn_t *tsdn, base_t *base);
-void base_postfork_child(tsdn_t *tsdn, base_t *base);
-bool base_boot(tsdn_t *tsdn);
-
-#endif /* JEMALLOC_INTERNAL_BASE_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin.h
deleted file mode 100644
index 63f97395..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BIN_H
-#define JEMALLOC_INTERNAL_BIN_H
-
-#include "jemalloc/internal/bin_stats.h"
-#include "jemalloc/internal/bin_types.h"
-#include "jemalloc/internal/edata.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/sc.h"
-
-/*
- * A bin contains a set of extents that are currently being used for slab
- * allocations.
- */
-typedef struct bin_s bin_t;
-struct bin_s {
- /* All operations on bin_t fields require lock ownership. */
- malloc_mutex_t lock;
-
- /*
- * Bin statistics. These get touched every time the lock is acquired,
- * so put them close by in the hopes of getting some cache locality.
- */
- bin_stats_t stats;
-
- /*
- * Current slab being used to service allocations of this bin's size
- * class. slabcur is independent of slabs_{nonfull,full}; whenever
- * slabcur is reassigned, the previous slab must be deallocated or
- * inserted into slabs_{nonfull,full}.
- */
- edata_t *slabcur;
-
- /*
- * Heap of non-full slabs. This heap is used to assure that new
- * allocations come from the non-full slab that is oldest/lowest in
- * memory.
- */
- edata_heap_t slabs_nonfull;
-
- /* List used to track full slabs. */
- edata_list_active_t slabs_full;
-};
-
-/* A set of sharded bins of the same size class. */
-typedef struct bins_s bins_t;
-struct bins_s {
- /* Sharded bins. Dynamically sized. */
- bin_t *bin_shards;
-};
-
-void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
-bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
- size_t end_size, size_t nshards);
-
-/* Initializes a bin to empty. Returns true on error. */
-bool bin_init(bin_t *bin);
-
-/* Forking. */
-void bin_prefork(tsdn_t *tsdn, bin_t *bin);
-void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
-void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
-
-/* Stats. */
-static inline void
-bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
- malloc_mutex_lock(tsdn, &bin->lock);
- malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
- bin_stats_t *stats = &dst_bin_stats->stats_data;
- stats->nmalloc += bin->stats.nmalloc;
- stats->ndalloc += bin->stats.ndalloc;
- stats->nrequests += bin->stats.nrequests;
- stats->curregs += bin->stats.curregs;
- stats->nfills += bin->stats.nfills;
- stats->nflushes += bin->stats.nflushes;
- stats->nslabs += bin->stats.nslabs;
- stats->reslabs += bin->stats.reslabs;
- stats->curslabs += bin->stats.curslabs;
- stats->nonfull_slabs += bin->stats.nonfull_slabs;
- malloc_mutex_unlock(tsdn, &bin->lock);
-}
-
-#endif /* JEMALLOC_INTERNAL_BIN_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_info.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_info.h
deleted file mode 100644
index 7fe65c86..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_info.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
-#define JEMALLOC_INTERNAL_BIN_INFO_H
-
-#include "jemalloc/internal/bitmap.h"
-
-/*
- * Read-only information associated with each element of arena_t's bins array
- * is stored separately, partly to reduce memory usage (only one copy, rather
- * than one per arena), but mainly to avoid false cacheline sharing.
- *
- * Each slab has the following layout:
- *
- * /--------------------\
- * | region 0 |
- * |--------------------|
- * | region 1 |
- * |--------------------|
- * | ... |
- * | ... |
- * | ... |
- * |--------------------|
- * | region nregs-1 |
- * \--------------------/
- */
-typedef struct bin_info_s bin_info_t;
-struct bin_info_s {
- /* Size of regions in a slab for this bin's size class. */
- size_t reg_size;
-
- /* Total size of a slab for this bin's size class. */
- size_t slab_size;
-
- /* Total number of regions in a slab for this bin's size class. */
- uint32_t nregs;
-
- /* Number of sharded bins in each arena for this size class. */
- uint32_t n_shards;
-
- /*
- * Metadata used to manipulate bitmaps for slabs associated with this
- * bin.
- */
- bitmap_info_t bitmap_info;
-};
-
-extern bin_info_t bin_infos[SC_NBINS];
-
-void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
-
-#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_stats.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_stats.h
deleted file mode 100644
index 0b99297c..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_stats.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
-#define JEMALLOC_INTERNAL_BIN_STATS_H
-
-#include "jemalloc/internal/mutex_prof.h"
-
-typedef struct bin_stats_s bin_stats_t;
-struct bin_stats_s {
- /*
- * Total number of allocation/deallocation requests served directly by
- * the bin. Note that tcache may allocate an object, then recycle it
- * many times, resulting many increments to nrequests, but only one
- * each to nmalloc and ndalloc.
- */
- uint64_t nmalloc;
- uint64_t ndalloc;
-
- /*
- * Number of allocation requests that correspond to the size of this
- * bin. This includes requests served by tcache, though tcache only
- * periodically merges into this counter.
- */
- uint64_t nrequests;
-
- /*
- * Current number of regions of this size class, including regions
- * currently cached by tcache.
- */
- size_t curregs;
-
- /* Number of tcache fills from this bin. */
- uint64_t nfills;
-
- /* Number of tcache flushes to this bin. */
- uint64_t nflushes;
-
- /* Total number of slabs created for this bin's size class. */
- uint64_t nslabs;
-
- /*
- * Total number of slabs reused by extracting them from the slabs heap
- * for this bin's size class.
- */
- uint64_t reslabs;
-
- /* Current number of slabs in this bin. */
- size_t curslabs;
-
- /* Current size of nonfull slabs heap in this bin. */
- size_t nonfull_slabs;
-};
-
-typedef struct bin_stats_data_s bin_stats_data_t;
-struct bin_stats_data_s {
- bin_stats_t stats_data;
- mutex_prof_data_t mutex_data;
-};
-#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_types.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_types.h
deleted file mode 100644
index 945e8326..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bin_types.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BIN_TYPES_H
-#define JEMALLOC_INTERNAL_BIN_TYPES_H
-
-#include "jemalloc/internal/sc.h"
-
-#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
-#define N_BIN_SHARDS_DEFAULT 1
-
-/* Used in TSD static initializer only. Real init in arena_bind(). */
-#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}}
-
-typedef struct tsd_binshards_s tsd_binshards_t;
-struct tsd_binshards_s {
- uint8_t binshard[SC_NBINS];
-};
-
-#endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bit_util.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bit_util.h
deleted file mode 100644
index bac59140..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bit_util.h
+++ /dev/null
@@ -1,422 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
-#define JEMALLOC_INTERNAL_BIT_UTIL_H
-
-#include "jemalloc/internal/assert.h"
-
-/* Sanity check. */
-#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
- || !defined(JEMALLOC_INTERNAL_FFS)
-# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
-#endif
-
-/*
- * Unlike the builtins and posix ffs functions, our ffs requires a non-zero
- * input, and returns the position of the lowest bit set (as opposed to the
- * posix versions, which return 1 larger than that position and use a return
- * value of zero as a sentinel. This tends to simplify logic in callers, and
- * allows for consistency with the builtins we build fls on top of.
- */
-static inline unsigned
-ffs_llu(unsigned long long x) {
- util_assume(x != 0);
- return JEMALLOC_INTERNAL_FFSLL(x) - 1;
-}
-
-static inline unsigned
-ffs_lu(unsigned long x) {
- util_assume(x != 0);
- return JEMALLOC_INTERNAL_FFSL(x) - 1;
-}
-
-static inline unsigned
-ffs_u(unsigned x) {
- util_assume(x != 0);
- return JEMALLOC_INTERNAL_FFS(x) - 1;
-}
-
-#define DO_FLS_SLOW(x, suffix) do { \
- util_assume(x != 0); \
- x |= (x >> 1); \
- x |= (x >> 2); \
- x |= (x >> 4); \
- x |= (x >> 8); \
- x |= (x >> 16); \
- if (sizeof(x) > 4) { \
- /* \
- * If sizeof(x) is 4, then the expression "x >> 32" \
- * will generate compiler warnings even if the code \
- * never executes. This circumvents the warning, and \
- * gets compiled out in optimized builds. \
- */ \
- int constant_32 = sizeof(x) * 4; \
- x |= (x >> constant_32); \
- } \
- x++; \
- if (x == 0) { \
- return 8 * sizeof(x) - 1; \
- } \
- return ffs_##suffix(x) - 1; \
-} while(0)
-
-static inline unsigned
-fls_llu_slow(unsigned long long x) {
- DO_FLS_SLOW(x, llu);
-}
-
-static inline unsigned
-fls_lu_slow(unsigned long x) {
- DO_FLS_SLOW(x, lu);
-}
-
-static inline unsigned
-fls_u_slow(unsigned x) {
- DO_FLS_SLOW(x, u);
-}
-
-#undef DO_FLS_SLOW
-
-#ifdef JEMALLOC_HAVE_BUILTIN_CLZ
-static inline unsigned
-fls_llu(unsigned long long x) {
- util_assume(x != 0);
- /*
- * Note that the xor here is more naturally written as subtraction; the
- * last bit set is the number of bits in the type minus the number of
- * leading zero bits. But GCC implements that as:
- * bsr edi, edi
- * mov eax, 31
- * xor edi, 31
- * sub eax, edi
- * If we write it as xor instead, then we get
- * bsr eax, edi
- * as desired.
- */
- return (8 * sizeof(x) - 1) ^ __builtin_clzll(x);
-}
-
-static inline unsigned
-fls_lu(unsigned long x) {
- util_assume(x != 0);
- return (8 * sizeof(x) - 1) ^ __builtin_clzl(x);
-}
-
-static inline unsigned
-fls_u(unsigned x) {
- util_assume(x != 0);
- return (8 * sizeof(x) - 1) ^ __builtin_clz(x);
-}
-#elif defined(_MSC_VER)
-
-#if LG_SIZEOF_PTR == 3
-#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
-#else
-/*
- * This never actually runs; we're just dodging a compiler error for the
- * never-taken branch where sizeof(void *) == 8.
- */
-#define DO_BSR64(bit, x) bit = 0; unreachable()
-#endif
-
-#define DO_FLS(x) do { \
- if (x == 0) { \
- return 8 * sizeof(x); \
- } \
- unsigned long bit; \
- if (sizeof(x) == 4) { \
- _BitScanReverse(&bit, (unsigned)x); \
- return (unsigned)bit; \
- } \
- if (sizeof(x) == 8 && sizeof(void *) == 8) { \
- DO_BSR64(bit, x); \
- return (unsigned)bit; \
- } \
- if (sizeof(x) == 8 && sizeof(void *) == 4) { \
- /* Dodge a compiler warning, as above. */ \
- int constant_32 = sizeof(x) * 4; \
- if (_BitScanReverse(&bit, \
- (unsigned)(x >> constant_32))) { \
- return 32 + (unsigned)bit; \
- } else { \
- _BitScanReverse(&bit, (unsigned)x); \
- return (unsigned)bit; \
- } \
- } \
- unreachable(); \
-} while (0)
-
-static inline unsigned
-fls_llu(unsigned long long x) {
- DO_FLS(x);
-}
-
-static inline unsigned
-fls_lu(unsigned long x) {
- DO_FLS(x);
-}
-
-static inline unsigned
-fls_u(unsigned x) {
- DO_FLS(x);
-}
-
-#undef DO_FLS
-#undef DO_BSR64
-#else
-
-static inline unsigned
-fls_llu(unsigned long long x) {
- return fls_llu_slow(x);
-}
-
-static inline unsigned
-fls_lu(unsigned long x) {
- return fls_lu_slow(x);
-}
-
-static inline unsigned
-fls_u(unsigned x) {
- return fls_u_slow(x);
-}
-#endif
-
-#if LG_SIZEOF_LONG_LONG > 3
-# error "Haven't implemented popcount for 16-byte ints."
-#endif
-
-#define DO_POPCOUNT(x, type) do { \
- /* \
- * Algorithm from an old AMD optimization reference manual. \
- * We're putting a little bit more work than you might expect \
- * into the no-instrinsic case, since we only support the \
- * GCC intrinsics spelling of popcount (for now). Detecting \
- * whether or not the popcount builtin is actually useable in \
- * MSVC is nontrivial. \
- */ \
- \
- type bmul = (type)0x0101010101010101ULL; \
- \
- /* \
- * Replace each 2 bits with the sideways sum of the original \
- * values. 0x5 = 0b0101. \
- * \
- * You might expect this to be: \
- * x = (x & 0x55...) + ((x >> 1) & 0x55...). \
- * That costs an extra mask relative to this, though. \
- */ \
- x = x - ((x >> 1) & (0x55U * bmul)); \
- /* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\
- x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \
- /* \
- * Replace each 8 bits with their sideways sum. Note that we \
- * can't overflow within each 4-bit sum here, so we can skip \
- * the initial mask. \
- */ \
- x = (x + (x >> 4)) & (bmul * 0x0FU); \
- /* \
- * None of the partial sums in this multiplication (viewed in \
- * base-256) can overflow into the next digit. So the least \
- * significant byte of the product will be the least \
- * significant byte of the original value, the second least \
- * significant byte will be the sum of the two least \
- * significant bytes of the original value, and so on. \
- * Importantly, the high byte will be the byte-wise sum of all \
- * the bytes of the original value. \
- */ \
- x = x * bmul; \
- x >>= ((sizeof(x) - 1) * 8); \
- return (unsigned)x; \
-} while(0)
-
-static inline unsigned
-popcount_u_slow(unsigned bitmap) {
- DO_POPCOUNT(bitmap, unsigned);
-}
-
-static inline unsigned
-popcount_lu_slow(unsigned long bitmap) {
- DO_POPCOUNT(bitmap, unsigned long);
-}
-
-static inline unsigned
-popcount_llu_slow(unsigned long long bitmap) {
- DO_POPCOUNT(bitmap, unsigned long long);
-}
-
-#undef DO_POPCOUNT
-
-static inline unsigned
-popcount_u(unsigned bitmap) {
-#ifdef JEMALLOC_INTERNAL_POPCOUNT
- return JEMALLOC_INTERNAL_POPCOUNT(bitmap);
-#else
- return popcount_u_slow(bitmap);
-#endif
-}
-
-static inline unsigned
-popcount_lu(unsigned long bitmap) {
-#ifdef JEMALLOC_INTERNAL_POPCOUNTL
- return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
-#else
- return popcount_lu_slow(bitmap);
-#endif
-}
-
-static inline unsigned
-popcount_llu(unsigned long long bitmap) {
-#ifdef JEMALLOC_INTERNAL_POPCOUNTLL
- return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap);
-#else
- return popcount_llu_slow(bitmap);
-#endif
-}
-
-/*
- * Clears first unset bit in bitmap, and returns
- * place of bit. bitmap *must not* be 0.
- */
-
-static inline size_t
-cfs_lu(unsigned long* bitmap) {
- util_assume(*bitmap != 0);
- size_t bit = ffs_lu(*bitmap);
- *bitmap ^= ZU(1) << bit;
- return bit;
-}
-
-static inline unsigned
-ffs_zu(size_t x) {
-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
- return ffs_u(x);
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
- return ffs_lu(x);
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
- return ffs_llu(x);
-#else
-#error No implementation for size_t ffs()
-#endif
-}
-
-static inline unsigned
-fls_zu(size_t x) {
-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
- return fls_u(x);
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
- return fls_lu(x);
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
- return fls_llu(x);
-#else
-#error No implementation for size_t fls()
-#endif
-}
-
-
-static inline unsigned
-ffs_u64(uint64_t x) {
-#if LG_SIZEOF_LONG == 3
- return ffs_lu(x);
-#elif LG_SIZEOF_LONG_LONG == 3
- return ffs_llu(x);
-#else
-#error No implementation for 64-bit ffs()
-#endif
-}
-
-static inline unsigned
-fls_u64(uint64_t x) {
-#if LG_SIZEOF_LONG == 3
- return fls_lu(x);
-#elif LG_SIZEOF_LONG_LONG == 3
- return fls_llu(x);
-#else
-#error No implementation for 64-bit fls()
-#endif
-}
-
-static inline unsigned
-ffs_u32(uint32_t x) {
-#if LG_SIZEOF_INT == 2
- return ffs_u(x);
-#else
-#error No implementation for 32-bit ffs()
-#endif
- return ffs_u(x);
-}
-
-static inline unsigned
-fls_u32(uint32_t x) {
-#if LG_SIZEOF_INT == 2
- return fls_u(x);
-#else
-#error No implementation for 32-bit fls()
-#endif
- return fls_u(x);
-}
-
-static inline uint64_t
-pow2_ceil_u64(uint64_t x) {
- if (unlikely(x <= 1)) {
- return x;
- }
- size_t msb_on_index = fls_u64(x - 1);
- /*
- * Range-check; it's on the callers to ensure that the result of this
- * call won't overflow.
- */
- assert(msb_on_index < 63);
- return 1ULL << (msb_on_index + 1);
-}
-
-static inline uint32_t
-pow2_ceil_u32(uint32_t x) {
- if (unlikely(x <= 1)) {
- return x;
- }
- size_t msb_on_index = fls_u32(x - 1);
- /* As above. */
- assert(msb_on_index < 31);
- return 1U << (msb_on_index + 1);
-}
-
-/* Compute the smallest power of 2 that is >= x. */
-static inline size_t
-pow2_ceil_zu(size_t x) {
-#if (LG_SIZEOF_PTR == 3)
- return pow2_ceil_u64(x);
-#else
- return pow2_ceil_u32(x);
-#endif
-}
-
-static inline unsigned
-lg_floor(size_t x) {
- util_assume(x != 0);
-#if (LG_SIZEOF_PTR == 3)
- return fls_u64(x);
-#else
- return fls_u32(x);
-#endif
-}
-
-static inline unsigned
-lg_ceil(size_t x) {
- return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
-}
-
-/* A compile-time version of lg_floor and lg_ceil. */
-#define LG_FLOOR_1(x) 0
-#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
-#define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2))
-#define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4))
-#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8))
-#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16))
-#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32))
-#if LG_SIZEOF_PTR == 2
-# define LG_FLOOR(x) LG_FLOOR_32((x))
-#else
-# define LG_FLOOR(x) LG_FLOOR_64((x))
-#endif
-
-#define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1))
-
-#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bitmap.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bitmap.h
deleted file mode 100644
index dc19454d..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/bitmap.h
+++ /dev/null
@@ -1,368 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BITMAP_H
-#define JEMALLOC_INTERNAL_BITMAP_H
-
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/sc.h"
-
-typedef unsigned long bitmap_t;
-#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
-
-/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
-#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
-/* Maximum bitmap bit count is determined by maximum regions per slab. */
-# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
-#else
-/* Maximum bitmap bit count is determined by number of extent size classes. */
-# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
-#endif
-#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
-
-/* Number of bits per group. */
-#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
-#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
-#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
-
-/*
- * Do some analysis on how big the bitmap is before we use a tree. For a brute
- * force linear search, if we would have to call ffs_lu() more than 2^3 times,
- * use a tree instead.
- */
-#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
-# define BITMAP_USE_TREE
-#endif
-
-/* Number of groups required to store a given number of bits. */
-#define BITMAP_BITS2GROUPS(nbits) \
- (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
-
-/*
- * Number of groups required at a particular level for a given number of bits.
- */
-#define BITMAP_GROUPS_L0(nbits) \
- BITMAP_BITS2GROUPS(nbits)
-#define BITMAP_GROUPS_L1(nbits) \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
-#define BITMAP_GROUPS_L2(nbits) \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
-#define BITMAP_GROUPS_L3(nbits) \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
- BITMAP_BITS2GROUPS((nbits)))))
-#define BITMAP_GROUPS_L4(nbits) \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
- BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
-
-/*
- * Assuming the number of levels, number of groups required for a given number
- * of bits.
- */
-#define BITMAP_GROUPS_1_LEVEL(nbits) \
- BITMAP_GROUPS_L0(nbits)
-#define BITMAP_GROUPS_2_LEVEL(nbits) \
- (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
-#define BITMAP_GROUPS_3_LEVEL(nbits) \
- (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
-#define BITMAP_GROUPS_4_LEVEL(nbits) \
- (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
-#define BITMAP_GROUPS_5_LEVEL(nbits) \
- (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
-
-/*
- * Maximum number of groups required to support LG_BITMAP_MAXBITS.
- */
-#ifdef BITMAP_USE_TREE
-
-#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
-# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
-# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
-#else
-# error "Unsupported bitmap size"
-#endif
-
-/*
- * Maximum number of levels possible. This could be statically computed based
- * on LG_BITMAP_MAXBITS:
- *
- * #define BITMAP_MAX_LEVELS \
- * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
- * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
- *
- * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
- * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
- * various cascading macros. The only additional cost this incurs is some
- * unused trailing entries in bitmap_info_t structures; the bitmaps themselves
- * are not impacted.
- */
-#define BITMAP_MAX_LEVELS 5
-
-#define BITMAP_INFO_INITIALIZER(nbits) { \
- /* nbits. */ \
- nbits, \
- /* nlevels. */ \
- (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
- (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
- (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
- (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
- /* levels. */ \
- { \
- {0}, \
- {BITMAP_GROUPS_L0(nbits)}, \
- {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
- {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
- BITMAP_GROUPS_L0(nbits)}, \
- {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
- BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
- {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
- BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
- + BITMAP_GROUPS_L0(nbits)} \
- } \
-}
-
-#else /* BITMAP_USE_TREE */
-
-#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
-#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
-
-#define BITMAP_INFO_INITIALIZER(nbits) { \
- /* nbits. */ \
- nbits, \
- /* ngroups. */ \
- BITMAP_BITS2GROUPS(nbits) \
-}
-
-#endif /* BITMAP_USE_TREE */
-
-typedef struct bitmap_level_s {
- /* Offset of this level's groups within the array of groups. */
- size_t group_offset;
-} bitmap_level_t;
-
-typedef struct bitmap_info_s {
- /* Logical number of bits in bitmap (stored at bottom level). */
- size_t nbits;
-
-#ifdef BITMAP_USE_TREE
- /* Number of levels necessary for nbits. */
- unsigned nlevels;
-
- /*
- * Only the first (nlevels+1) elements are used, and levels are ordered
- * bottom to top (e.g. the bottom level is stored in levels[0]).
- */
- bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
-#else /* BITMAP_USE_TREE */
- /* Number of groups necessary for nbits. */
- size_t ngroups;
-#endif /* BITMAP_USE_TREE */
-} bitmap_info_t;
-
-void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
-void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
-size_t bitmap_size(const bitmap_info_t *binfo);
-
-static inline bool
-bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
-#ifdef BITMAP_USE_TREE
- size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
- bitmap_t rg = bitmap[rgoff];
- /* The bitmap is full iff the root group is 0. */
- return (rg == 0);
-#else
- size_t i;
-
- for (i = 0; i < binfo->ngroups; i++) {
- if (bitmap[i] != 0) {
- return false;
- }
- }
- return true;
-#endif
-}
-
-static inline bool
-bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
- size_t goff;
- bitmap_t g;
-
- assert(bit < binfo->nbits);
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- g = bitmap[goff];
- return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
-}
-
-static inline void
-bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
- size_t goff;
- bitmap_t *gp;
- bitmap_t g;
-
- assert(bit < binfo->nbits);
- assert(!bitmap_get(bitmap, binfo, bit));
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- gp = &bitmap[goff];
- g = *gp;
- assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
- *gp = g;
- assert(bitmap_get(bitmap, binfo, bit));
-#ifdef BITMAP_USE_TREE
- /* Propagate group state transitions up the tree. */
- if (g == 0) {
- unsigned i;
- for (i = 1; i < binfo->nlevels; i++) {
- bit = goff;
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- gp = &bitmap[binfo->levels[i].group_offset + goff];
- g = *gp;
- assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
- *gp = g;
- if (g != 0) {
- break;
- }
- }
- }
-#endif
-}
-
-/* ffu: find first unset >= bit. */
-static inline size_t
-bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- assert(min_bit < binfo->nbits);
-
-#ifdef BITMAP_USE_TREE
- size_t bit = 0;
- for (unsigned level = binfo->nlevels; level--;) {
- size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
- 1));
- bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
- >> lg_bits_per_group)];
- unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
- bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
- assert(group_nmask <= BITMAP_GROUP_NBITS);
- bitmap_t group_mask = ~((1LU << group_nmask) - 1);
- bitmap_t group_masked = group & group_mask;
- if (group_masked == 0LU) {
- if (group == 0LU) {
- return binfo->nbits;
- }
- /*
- * min_bit was preceded by one or more unset bits in
- * this group, but there are no other unset bits in this
- * group. Try again starting at the first bit of the
- * next sibling. This will recurse at most once per
- * non-root level.
- */
- size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
- assert(sib_base > min_bit);
- assert(sib_base > bit);
- if (sib_base >= binfo->nbits) {
- return binfo->nbits;
- }
- return bitmap_ffu(bitmap, binfo, sib_base);
- }
- bit += ((size_t)ffs_lu(group_masked)) <<
- (lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
- }
- assert(bit >= min_bit);
- assert(bit < binfo->nbits);
- return bit;
-#else
- size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
- bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
- - 1);
- size_t bit;
- do {
- if (g != 0) {
- bit = ffs_lu(g);
- return (i << LG_BITMAP_GROUP_NBITS) + bit;
- }
- i++;
- g = bitmap[i];
- } while (i < binfo->ngroups);
- return binfo->nbits;
-#endif
-}
-
-/* sfu: set first unset. */
-static inline size_t
-bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
- size_t bit;
- bitmap_t g;
- unsigned i;
-
- assert(!bitmap_full(bitmap, binfo));
-
-#ifdef BITMAP_USE_TREE
- i = binfo->nlevels - 1;
- g = bitmap[binfo->levels[i].group_offset];
- bit = ffs_lu(g);
- while (i > 0) {
- i--;
- g = bitmap[binfo->levels[i].group_offset + bit];
- bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
- }
-#else
- i = 0;
- g = bitmap[0];
- while (g == 0) {
- i++;
- g = bitmap[i];
- }
- bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
-#endif
- bitmap_set(bitmap, binfo, bit);
- return bit;
-}
-
-static inline void
-bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
- size_t goff;
- bitmap_t *gp;
- bitmap_t g;
- UNUSED bool propagate;
-
- assert(bit < binfo->nbits);
- assert(bitmap_get(bitmap, binfo, bit));
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- gp = &bitmap[goff];
- g = *gp;
- propagate = (g == 0);
- assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
- *gp = g;
- assert(!bitmap_get(bitmap, binfo, bit));
-#ifdef BITMAP_USE_TREE
- /* Propagate group state transitions up the tree. */
- if (propagate) {
- unsigned i;
- for (i = 1; i < binfo->nlevels; i++) {
- bit = goff;
- goff = bit >> LG_BITMAP_GROUP_NBITS;
- gp = &bitmap[binfo->levels[i].group_offset + goff];
- g = *gp;
- propagate = (g == 0);
- assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
- == 0);
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
- *gp = g;
- if (!propagate) {
- break;
- }
- }
- }
-#endif /* BITMAP_USE_TREE */
-}
-
-#endif /* JEMALLOC_INTERNAL_BITMAP_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/buf_writer.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/buf_writer.h
deleted file mode 100644
index 37aa6de5..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/buf_writer.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BUF_WRITER_H
-#define JEMALLOC_INTERNAL_BUF_WRITER_H
-
-/*
- * Note: when using the buffered writer, cbopaque is passed to write_cb only
- * when the buffer is flushed. It would make a difference if cbopaque points
- * to something that's changing for each write_cb call, or something that
- * affects write_cb in a way dependent on the content of the output string.
- * However, the most typical usage case in practice is that cbopaque points to
- * some "option like" content for the write_cb, so it doesn't matter.
- */
-
-typedef struct {
- write_cb_t *write_cb;
- void *cbopaque;
- char *buf;
- size_t buf_size;
- size_t buf_end;
- bool internal_buf;
-} buf_writer_t;
-
-bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
- write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
-void buf_writer_flush(buf_writer_t *buf_writer);
-write_cb_t buf_writer_cb;
-void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
-
-typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
-void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
- void *read_cbopaque);
-
-#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/cache_bin.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/cache_bin.h
deleted file mode 100644
index caf5be33..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/cache_bin.h
+++ /dev/null
@@ -1,670 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
-#define JEMALLOC_INTERNAL_CACHE_BIN_H
-
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sz.h"
-
-/*
- * The cache_bins are the mechanism that the tcache and the arena use to
- * communicate. The tcache fills from and flushes to the arena by passing a
- * cache_bin_t to fill/flush. When the arena needs to pull stats from the
- * tcaches associated with it, it does so by iterating over its
- * cache_bin_array_descriptor_t objects and reading out per-bin stats it
- * contains. This makes it so that the arena need not know about the existence
- * of the tcache at all.
- */
-
-/*
- * The size in bytes of each cache bin stack. We also use this to indicate
- * *counts* of individual objects.
- */
-typedef uint16_t cache_bin_sz_t;
-
-/*
- * Leave a noticeable mark pattern on the cache bin stack boundaries, in case a
- * bug starts leaking those. Make it look like the junk pattern but be distinct
- * from it.
- */
-static const uintptr_t cache_bin_preceding_junk =
- (uintptr_t)0x7a7a7a7a7a7a7a7aULL;
-/* Note: a7 vs. 7a above -- this tells you which pointer leaked. */
-static const uintptr_t cache_bin_trailing_junk =
- (uintptr_t)0xa7a7a7a7a7a7a7a7ULL;
-
-/*
- * That implies the following value, for the maximum number of items in any
- * individual bin. The cache bins track their bounds looking just at the low
- * bits of a pointer, compared against a cache_bin_sz_t. So that's
- * 1 << (sizeof(cache_bin_sz_t) * 8)
- * bytes spread across pointer sized objects to get the maximum.
- */
-#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \
- / sizeof(void *) - 1)
-
-/*
- * This lives inside the cache_bin (for locality reasons), and is initialized
- * alongside it, but is otherwise not modified by any cache bin operations.
- * It's logically public and maintained by its callers.
- */
-typedef struct cache_bin_stats_s cache_bin_stats_t;
-struct cache_bin_stats_s {
- /*
- * Number of allocation requests that corresponded to the size of this
- * bin.
- */
- uint64_t nrequests;
-};
-
-/*
- * Read-only information associated with each element of tcache_t's tbins array
- * is stored separately, mainly to reduce memory usage.
- */
-typedef struct cache_bin_info_s cache_bin_info_t;
-struct cache_bin_info_s {
- cache_bin_sz_t ncached_max;
-};
-
-/*
- * Responsible for caching allocations associated with a single size.
- *
- * Several pointers are used to track the stack. To save on metadata bytes,
- * only the stack_head is a full sized pointer (which is dereferenced on the
- * fastpath), while the others store only the low 16 bits -- this is correct
- * because a single stack never takes more space than 2^16 bytes, and at the
- * same time only equality checks are performed on the low bits.
- *
- * (low addr) (high addr)
- * |------stashed------|------available------|------cached-----|
- * ^ ^ ^ ^
- * low_bound(derived) low_bits_full stack_head low_bits_empty
- */
-typedef struct cache_bin_s cache_bin_t;
-struct cache_bin_s {
- /*
- * The stack grows down. Whenever the bin is nonempty, the head points
- * to an array entry containing a valid allocation. When it is empty,
- * the head points to one element past the owned array.
- */
- void **stack_head;
- /*
- * cur_ptr and stats are both modified frequently. Let's keep them
- * close so that they have a higher chance of being on the same
- * cacheline, thus less write-backs.
- */
- cache_bin_stats_t tstats;
-
- /*
- * The low bits of the address of the first item in the stack that
- * hasn't been used since the last GC, to track the low water mark (min
- * # of cached items).
- *
- * Since the stack grows down, this is a higher address than
- * low_bits_full.
- */
- uint16_t low_bits_low_water;
-
- /*
- * The low bits of the value that stack_head will take on when the array
- * is full (of cached & stashed items). But remember that stack_head
- * always points to a valid item when the array is nonempty -- this is
- * in the array.
- *
- * Recall that since the stack grows down, this is the lowest available
- * address in the array for caching. Only adjusted when stashing items.
- */
- uint16_t low_bits_full;
-
- /*
- * The low bits of the value that stack_head will take on when the array
- * is empty.
- *
- * The stack grows down -- this is one past the highest address in the
- * array. Immutable after initialization.
- */
- uint16_t low_bits_empty;
-};
-
-/*
- * The cache_bins live inside the tcache, but the arena (by design) isn't
- * supposed to know much about tcache internals. To let the arena iterate over
- * associated bins, we keep (with the tcache) a linked list of
- * cache_bin_array_descriptor_ts that tell the arena how to find the bins.
- */
-typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
-struct cache_bin_array_descriptor_s {
- /*
- * The arena keeps a list of the cache bins associated with it, for
- * stats collection.
- */
- ql_elm(cache_bin_array_descriptor_t) link;
- /* Pointers to the tcache bins. */
- cache_bin_t *bins;
-};
-
-static inline void
-cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
- cache_bin_t *bins) {
- ql_elm_new(descriptor, link);
- descriptor->bins = bins;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-cache_bin_nonfast_aligned(const void *ptr) {
- if (!config_uaf_detection) {
- return false;
- }
- /*
- * Currently we use alignment to decide which pointer to junk & stash on
- * dealloc (for catching use-after-free). In some common cases a
- * page-aligned check is needed already (sdalloc w/ config_prof), so we
- * are getting it more or less for free -- no added instructions on
- * free_fastpath.
- *
- * Another way of deciding which pointer to sample, is adding another
- * thread_event to pick one every N bytes. That also adds no cost on
- * the fastpath, however it will tend to pick large allocations which is
- * not the desired behavior.
- */
- return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0;
-}
-
-/* Returns ncached_max: Upper limit on ncached. */
-static inline cache_bin_sz_t
-cache_bin_info_ncached_max(cache_bin_info_t *info) {
- return info->ncached_max;
-}
-
-/*
- * Internal.
- *
- * Asserts that the pointer associated with earlier is <= the one associated
- * with later.
- */
-static inline void
-cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
- if (earlier > later) {
- assert(bin->low_bits_full > bin->low_bits_empty);
- }
-}
-
-/*
- * Internal.
- *
- * Does difference calculations that handle wraparound correctly. Earlier must
- * be associated with the position earlier in memory.
- */
-static inline uint16_t
-cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later, bool racy) {
- /*
- * When it's racy, bin->low_bits_full can be modified concurrently. It
- * can cross the uint16_t max value and become less than
- * bin->low_bits_empty at the time of the check.
- */
- if (!racy) {
- cache_bin_assert_earlier(bin, earlier, later);
- }
- return later - earlier;
-}
-
-/*
- * Number of items currently cached in the bin, without checking ncached_max.
- * We require specifying whether or not the request is racy or not (i.e. whether
- * or not concurrent modifications are possible).
- */
-static inline cache_bin_sz_t
-cache_bin_ncached_get_internal(cache_bin_t *bin, bool racy) {
- cache_bin_sz_t diff = cache_bin_diff(bin,
- (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, racy);
- cache_bin_sz_t n = diff / sizeof(void *);
- /*
- * We have undefined behavior here; if this function is called from the
- * arena stats updating code, then stack_head could change from the
- * first line to the next one. Morally, these loads should be atomic,
- * but compilers won't currently generate comparisons with in-memory
- * operands against atomics, and these variables get accessed on the
- * fast paths. This should still be "safe" in the sense of generating
- * the correct assembly for the foreseeable future, though.
- */
- assert(n == 0 || *(bin->stack_head) != NULL || racy);
- return n;
-}
-
-/*
- * Number of items currently cached in the bin, with checking ncached_max. The
- * caller must know that no concurrent modification of the cache_bin is
- * possible.
- */
-static inline cache_bin_sz_t
-cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
- cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
- /* racy */ false);
- assert(n <= cache_bin_info_ncached_max(info));
- return n;
-}
-
-/*
- * Internal.
- *
- * A pointer to the position one past the end of the backing array.
- *
- * Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full'
- * are subject to concurrent modifications.
- */
-static inline void **
-cache_bin_empty_position_get(cache_bin_t *bin) {
- cache_bin_sz_t diff = cache_bin_diff(bin,
- (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty,
- /* racy */ false);
- uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff;
- void **ret = (void **)empty_bits;
-
- assert(ret >= bin->stack_head);
-
- return ret;
-}
-
-/*
- * Internal.
- *
- * Calculates low bits of the lower bound of the usable cache bin's range (see
- * cache_bin_t visual representation above).
- *
- * No values are concurrently modified, so should be safe to read in a
- * multithreaded environment. Currently concurrent access happens only during
- * arena statistics collection.
- */
-static inline uint16_t
-cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
- return (uint16_t)bin->low_bits_empty -
- info->ncached_max * sizeof(void *);
-}
-
-/*
- * Internal.
- *
- * A pointer to the position with the lowest address of the backing array.
- */
-static inline void **
-cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
- cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
- void **ret = cache_bin_empty_position_get(bin) - ncached_max;
- assert(ret <= bin->stack_head);
-
- return ret;
-}
-
-/*
- * As the name implies. This is important since it's not correct to try to
- * batch fill a nonempty cache bin.
- */
-static inline void
-cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
- assert(cache_bin_ncached_get_local(bin, info) == 0);
- assert(cache_bin_empty_position_get(bin) == bin->stack_head);
-}
-
-/*
- * Get low water, but without any of the correctness checking we do for the
- * caller-usable version, if we are temporarily breaking invariants (like
- * ncached >= low_water during flush).
- */
-static inline cache_bin_sz_t
-cache_bin_low_water_get_internal(cache_bin_t *bin) {
- return cache_bin_diff(bin, bin->low_bits_low_water,
- bin->low_bits_empty, /* racy */ false) / sizeof(void *);
-}
-
-/* Returns the numeric value of low water in [0, ncached]. */
-static inline cache_bin_sz_t
-cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
- cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
- assert(low_water <= cache_bin_info_ncached_max(info));
- assert(low_water <= cache_bin_ncached_get_local(bin, info));
-
- cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
- bin->low_bits_low_water);
-
- return low_water;
-}
-
-/*
- * Indicates that the current cache bin position should be the low water mark
- * going forward.
- */
-static inline void
-cache_bin_low_water_set(cache_bin_t *bin) {
- bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
-}
-
-static inline void
-cache_bin_low_water_adjust(cache_bin_t *bin) {
- if (cache_bin_ncached_get_internal(bin, /* racy */ false)
- < cache_bin_low_water_get_internal(bin)) {
- cache_bin_low_water_set(bin);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) {
- /*
- * success (instead of ret) should be checked upon the return of this
- * function. We avoid checking (ret == NULL) because there is never a
- * null stored on the avail stack (which is unknown to the compiler),
- * and eagerly checking ret would cause pipeline stall (waiting for the
- * cacheline).
- */
-
- /*
- * This may read from the empty position; however the loaded value won't
- * be used. It's safe because the stack has one more slot reserved.
- */
- void *ret = *bin->stack_head;
- uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head;
- void **new_head = bin->stack_head + 1;
-
- /*
- * Note that the low water mark is at most empty; if we pass this check,
- * we know we're non-empty.
- */
- if (likely(low_bits != bin->low_bits_low_water)) {
- bin->stack_head = new_head;
- *success = true;
- return ret;
- }
- if (!adjust_low_water) {
- *success = false;
- return NULL;
- }
- /*
- * In the fast-path case where we call alloc_easy and then alloc, the
- * previous checking and computation is optimized away -- we didn't
- * actually commit any of our operations.
- */
- if (likely(low_bits != bin->low_bits_empty)) {
- bin->stack_head = new_head;
- bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head;
- *success = true;
- return ret;
- }
- *success = false;
- return NULL;
-}
-
-/*
- * Allocate an item out of the bin, failing if we're at the low-water mark.
- */
-JEMALLOC_ALWAYS_INLINE void *
-cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
- /* We don't look at info if we're not adjusting low-water. */
- return cache_bin_alloc_impl(bin, success, false);
-}
-
-/*
- * Allocate an item out of the bin, even if we're currently at the low-water
- * mark (and failing only if the bin is empty).
- */
-JEMALLOC_ALWAYS_INLINE void *
-cache_bin_alloc(cache_bin_t *bin, bool *success) {
- return cache_bin_alloc_impl(bin, success, true);
-}
-
-JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
-cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
- cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
- /* racy */ false);
- if (n > num) {
- n = (cache_bin_sz_t)num;
- }
- memcpy(out, bin->stack_head, n * sizeof(void *));
- bin->stack_head += n;
- cache_bin_low_water_adjust(bin);
-
- return n;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-cache_bin_full(cache_bin_t *bin) {
- return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
-}
-
-/*
- * Free an object into the given bin. Fails only if the bin is full.
- */
-JEMALLOC_ALWAYS_INLINE bool
-cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
- if (unlikely(cache_bin_full(bin))) {
- return false;
- }
-
- bin->stack_head--;
- *bin->stack_head = ptr;
- cache_bin_assert_earlier(bin, bin->low_bits_full,
- (uint16_t)(uintptr_t)bin->stack_head);
-
- return true;
-}
-
-/* Returns false if failed to stash (i.e. bin is full). */
-JEMALLOC_ALWAYS_INLINE bool
-cache_bin_stash(cache_bin_t *bin, void *ptr) {
- if (cache_bin_full(bin)) {
- return false;
- }
-
- /* Stash at the full position, in the [full, head) range. */
- uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
- /* Wraparound handled as well. */
- uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head,
- /* racy */ false);
- *(void **)((uintptr_t)bin->stack_head - diff) = ptr;
-
- assert(!cache_bin_full(bin));
- bin->low_bits_full += sizeof(void *);
- cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head);
-
- return true;
-}
-
-/*
- * Get the number of stashed pointers.
- *
- * When called from a thread not owning the TLS (i.e. racy = true), it's
- * important to keep in mind that 'bin->stack_head' and 'bin->low_bits_full' can
- * be modified concurrently and almost none assertions about their values can be
- * made.
- */
-JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
-cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info,
- bool racy) {
- cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
- uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
- info);
-
- cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
- bin->low_bits_full, racy) / sizeof(void *);
- assert(n <= ncached_max);
-
- if (!racy) {
- /* Below are for assertions only. */
- void **low_bound = cache_bin_low_bound_get(bin, info);
-
- assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
- void *stashed = *(low_bound + n - 1);
- bool aligned = cache_bin_nonfast_aligned(stashed);
-#ifdef JEMALLOC_JET
- /* Allow arbitrary pointers to be stashed in tests. */
- aligned = true;
-#endif
- assert(n == 0 || (stashed != NULL && aligned));
- }
-
- return n;
-}
-
-JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
-cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
- cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info,
- /* racy */ false);
- assert(n <= cache_bin_info_ncached_max(info));
- return n;
-}
-
-/*
- * Obtain a racy view of the number of items currently in the cache bin, in the
- * presence of possible concurrent modifications.
- */
-static inline void
-cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
- cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
- cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true);
- assert(n <= cache_bin_info_ncached_max(info));
- *ncached = n;
-
- n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true);
- assert(n <= cache_bin_info_ncached_max(info));
- *nstashed = n;
- /* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
-}
-
-/*
- * Filling and flushing are done in batch, on arrays of void *s. For filling,
- * the arrays go forward, and can be accessed with ordinary array arithmetic.
- * For flushing, we work from the end backwards, and so need to use special
- * accessors that invert the usual ordering.
- *
- * This is important for maintaining first-fit; the arena code fills with
- * earliest objects first, and so those are the ones we should return first for
- * cache_bin_alloc calls. When flushing, we should flush the objects that we
- * wish to return later; those at the end of the array. This is better for the
- * first-fit heuristic as well as for cache locality; the most recently freed
- * objects are the ones most likely to still be in cache.
- *
- * This all sounds very hand-wavey and theoretical, but reverting the ordering
- * on one or the other pathway leads to measurable slowdowns.
- */
-
-typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
-struct cache_bin_ptr_array_s {
- cache_bin_sz_t n;
- void **ptr;
-};
-
-/*
- * Declare a cache_bin_ptr_array_t sufficient for nval items.
- *
- * In the current implementation, this could be just part of a
- * cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory.
- * Indirecting behind a macro, though, means experimenting with linked-list
- * representations is easy (since they'll require an alloca in the calling
- * frame).
- */
-#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \
- cache_bin_ptr_array_t name; \
- name.n = (nval)
-
-/*
- * Start a fill. The bin must be empty, and This must be followed by a
- * finish_fill call before doing any alloc/dalloc operations on the bin.
- */
-static inline void
-cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
- cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
- cache_bin_assert_empty(bin, info);
- arr->ptr = cache_bin_empty_position_get(bin) - nfill;
-}
-
-/*
- * While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to
- * fill, nfilled here is the number we actually filled (which may be less, in
- * case of OOM.
- */
-static inline void
-cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
- cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
- cache_bin_assert_empty(bin, info);
- void **empty_position = cache_bin_empty_position_get(bin);
- if (nfilled < arr->n) {
- memmove(empty_position - nfilled, empty_position - arr->n,
- nfilled * sizeof(void *));
- }
- bin->stack_head = empty_position - nfilled;
-}
-
-/*
- * Same deal, but with flush. Unlike fill (which can fail), the user must flush
- * everything we give them.
- */
-static inline void
-cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
- cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
- arr->ptr = cache_bin_empty_position_get(bin) - nflush;
- assert(cache_bin_ncached_get_local(bin, info) == 0
- || *arr->ptr != NULL);
-}
-
-static inline void
-cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
- cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
- unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
- memmove(bin->stack_head + nflushed, bin->stack_head,
- rem * sizeof(void *));
- bin->stack_head = bin->stack_head + nflushed;
- cache_bin_low_water_adjust(bin);
-}
-
-static inline void
-cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
- cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
- cache_bin_sz_t nstashed) {
- assert(nstashed > 0);
- assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
-
- void **low_bound = cache_bin_low_bound_get(bin, info);
- arr->ptr = low_bound;
- assert(*arr->ptr != NULL);
-}
-
-static inline void
-cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
- void **low_bound = cache_bin_low_bound_get(bin, info);
-
- /* Reset the bin local full position. */
- bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
- assert(cache_bin_nstashed_get_local(bin, info) == 0);
-}
-
-/*
- * Initialize a cache_bin_info to represent up to the given number of items in
- * the cache_bins it is associated with.
- */
-void cache_bin_info_init(cache_bin_info_t *bin_info,
- cache_bin_sz_t ncached_max);
-/*
- * Given an array of initialized cache_bin_info_ts, determine how big an
- * allocation is required to initialize a full set of cache_bin_ts.
- */
-void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
- size_t *size, size_t *alignment);
-
-/*
- * Actually initialize some cache bins. Callers should allocate the backing
- * memory indicated by a call to cache_bin_compute_alloc. They should then
- * preincrement, call init once for each bin and info, and then call
- * cache_bin_postincrement. *alloc_cur will then point immediately past the end
- * of the allocation.
- */
-void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
- void *alloc, size_t *cur_offset);
-void cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos,
- void *alloc, size_t *cur_offset);
-void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
- size_t *cur_offset);
-
-/*
- * If a cache bin was zero initialized (either because it lives in static or
- * thread-local storage, or was memset to 0), this function indicates whether or
- * not cache_bin_init was called on it.
- */
-bool cache_bin_still_zero_initialized(cache_bin_t *bin);
-
-#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ckh.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ckh.h
deleted file mode 100644
index 7b3850bc..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ckh.h
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CKH_H
-#define JEMALLOC_INTERNAL_CKH_H
-
-#include "jemalloc/internal/tsd.h"
-
-/* Cuckoo hashing implementation. Skip to the end for the interface. */
-
-/******************************************************************************/
-/* INTERNAL DEFINITIONS -- IGNORE */
-/******************************************************************************/
-
-/* Maintain counters used to get an idea of performance. */
-/* #define CKH_COUNT */
-/* Print counter values in ckh_delete() (requires CKH_COUNT). */
-/* #define CKH_VERBOSE */
-
-/*
- * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
- * one bucket per L1 cache line.
- */
-#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
-
-/* Typedefs to allow easy function pointer passing. */
-typedef void ckh_hash_t (const void *, size_t[2]);
-typedef bool ckh_keycomp_t (const void *, const void *);
-
-/* Hash table cell. */
-typedef struct {
- const void *key;
- const void *data;
-} ckhc_t;
-
-/* The hash table itself. */
-typedef struct {
-#ifdef CKH_COUNT
- /* Counters used to get an idea of performance. */
- uint64_t ngrows;
- uint64_t nshrinks;
- uint64_t nshrinkfails;
- uint64_t ninserts;
- uint64_t nrelocs;
-#endif
-
- /* Used for pseudo-random number generation. */
- uint64_t prng_state;
-
- /* Total number of items. */
- size_t count;
-
- /*
- * Minimum and current number of hash table buckets. There are
- * 2^LG_CKH_BUCKET_CELLS cells per bucket.
- */
- unsigned lg_minbuckets;
- unsigned lg_curbuckets;
-
- /* Hash and comparison functions. */
- ckh_hash_t *hash;
- ckh_keycomp_t *keycomp;
-
- /* Hash table with 2^lg_curbuckets buckets. */
- ckhc_t *tab;
-} ckh_t;
-
-/******************************************************************************/
-/* BEGIN PUBLIC API */
-/******************************************************************************/
-
-/* Lifetime management. Minitems is the initial capacity. */
-bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
- ckh_keycomp_t *keycomp);
-void ckh_delete(tsd_t *tsd, ckh_t *ckh);
-
-/* Get the number of elements in the set. */
-size_t ckh_count(ckh_t *ckh);
-
-/*
- * To iterate over the elements in the table, initialize *tabind to 0 and call
- * this function until it returns true. Each call that returns false will
- * update *key and *data to the next element in the table, assuming the pointers
- * are non-NULL.
- */
-bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
-
-/*
- * Basic hash table operations -- insert, removal, lookup. For ckh_remove and
- * ckh_search, key or data can be NULL. The hash-table only stores pointers to
- * the key and value, and doesn't do any lifetime management.
- */
-bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
-bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
- void **data);
-bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
-
-/* Some useful hash and comparison functions for strings and pointers. */
-void ckh_string_hash(const void *key, size_t r_hash[2]);
-bool ckh_string_keycomp(const void *k1, const void *k2);
-void ckh_pointer_hash(const void *key, size_t r_hash[2]);
-bool ckh_pointer_keycomp(const void *k1, const void *k2);
-
-#endif /* JEMALLOC_INTERNAL_CKH_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/counter.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/counter.h
deleted file mode 100644
index 79abf064..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/counter.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_COUNTER_H
-#define JEMALLOC_INTERNAL_COUNTER_H
-
-#include "jemalloc/internal/mutex.h"
-
-typedef struct counter_accum_s {
- LOCKEDINT_MTX_DECLARE(mtx)
- locked_u64_t accumbytes;
- uint64_t interval;
-} counter_accum_t;
-
-JEMALLOC_ALWAYS_INLINE bool
-counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
- uint64_t interval = counter->interval;
- assert(interval > 0);
- LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
- /*
- * If the event moves fast enough (and/or if the event handling is slow
- * enough), extreme overflow can cause counter trigger coalescing.
- * This is an intentional mechanism that avoids rate-limiting
- * allocation.
- */
- bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
- &counter->accumbytes, bytes, interval);
- LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
- return overflow;
-}
-
-bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
-void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
-void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
-void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
-
-#endif /* JEMALLOC_INTERNAL_COUNTER_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ctl.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ctl.h
deleted file mode 100644
index 63d27f8a..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ctl.h
+++ /dev/null
@@ -1,159 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_CTL_H
-#define JEMALLOC_INTERNAL_CTL_H
-
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/mutex_prof.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/stats.h"
-
-/* Maximum ctl tree depth. */
-#define CTL_MAX_DEPTH 7
-
-typedef struct ctl_node_s {
- bool named;
-} ctl_node_t;
-
-typedef struct ctl_named_node_s {
- ctl_node_t node;
- const char *name;
- /* If (nchildren == 0), this is a terminal node. */
- size_t nchildren;
- const ctl_node_t *children;
- int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
- size_t);
-} ctl_named_node_t;
-
-typedef struct ctl_indexed_node_s {
- struct ctl_node_s node;
- const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
- size_t);
-} ctl_indexed_node_t;
-
-typedef struct ctl_arena_stats_s {
- arena_stats_t astats;
-
- /* Aggregate stats for small size classes, based on bin stats. */
- size_t allocated_small;
- uint64_t nmalloc_small;
- uint64_t ndalloc_small;
- uint64_t nrequests_small;
- uint64_t nfills_small;
- uint64_t nflushes_small;
-
- bin_stats_data_t bstats[SC_NBINS];
- arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
- pac_estats_t estats[SC_NPSIZES];
- hpa_shard_stats_t hpastats;
- sec_stats_t secstats;
-} ctl_arena_stats_t;
-
-typedef struct ctl_stats_s {
- size_t allocated;
- size_t active;
- size_t metadata;
- size_t metadata_thp;
- size_t resident;
- size_t mapped;
- size_t retained;
-
- background_thread_stats_t background_thread;
- mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
-} ctl_stats_t;
-
-typedef struct ctl_arena_s ctl_arena_t;
-struct ctl_arena_s {
- unsigned arena_ind;
- bool initialized;
- ql_elm(ctl_arena_t) destroyed_link;
-
- /* Basic stats, supported even if !config_stats. */
- unsigned nthreads;
- const char *dss;
- ssize_t dirty_decay_ms;
- ssize_t muzzy_decay_ms;
- size_t pactive;
- size_t pdirty;
- size_t pmuzzy;
-
- /* NULL if !config_stats. */
- ctl_arena_stats_t *astats;
-};
-
-typedef struct ctl_arenas_s {
- uint64_t epoch;
- unsigned narenas;
- ql_head(ctl_arena_t) destroyed;
-
- /*
- * Element 0 corresponds to merged stats for extant arenas (accessed via
- * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
- * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
- * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
- */
- ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
-} ctl_arenas_t;
-
-int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen);
-int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
-int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
-int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
- size_t *miblenp);
-int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
- size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-bool ctl_boot(void);
-void ctl_prefork(tsdn_t *tsdn);
-void ctl_postfork_parent(tsdn_t *tsdn);
-void ctl_postfork_child(tsdn_t *tsdn);
-void ctl_mtx_assert_held(tsdn_t *tsdn);
-
-#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
- if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
- != 0) { \
- malloc_printf( \
- "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
- name); \
- abort(); \
- } \
-} while (0)
-
-#define xmallctlnametomib(name, mibp, miblenp) do { \
- if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
- malloc_printf("<jemalloc>: Failure in " \
- "xmallctlnametomib(\"%s\", ...)\n", name); \
- abort(); \
- } \
-} while (0)
-
-#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
- if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
- newlen) != 0) { \
- malloc_write( \
- "<jemalloc>: Failure in xmallctlbymib()\n"); \
- abort(); \
- } \
-} while (0)
-
-#define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \
- if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \
- != 0) { \
- malloc_write( \
- "<jemalloc>: Failure in ctl_mibnametomib()\n"); \
- abort(); \
- } \
-} while (0)
-
-#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \
- newp, newlen) do { \
- if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \
- oldp, oldlenp, newp, newlen) != 0) { \
- malloc_write( \
- "<jemalloc>: Failure in ctl_bymibname()\n"); \
- abort(); \
- } \
-} while (0)
-
-#endif /* JEMALLOC_INTERNAL_CTL_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/decay.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/decay.h
deleted file mode 100644
index cf6a9d22..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/decay.h
+++ /dev/null
@@ -1,186 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_DECAY_H
-#define JEMALLOC_INTERNAL_DECAY_H
-
-#include "jemalloc/internal/smoothstep.h"
-
-#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1)
-
-/*
- * The decay_t computes the number of pages we should purge at any given time.
- * Page allocators inform a decay object when pages enter a decay-able state
- * (i.e. dirty or muzzy), and query it to determine how many pages should be
- * purged at any given time.
- *
- * This is mostly a single-threaded data structure and doesn't care about
- * synchronization at all; it's the caller's responsibility to manage their
- * synchronization on their own. There are two exceptions:
- * 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query).
- * 2) The mtx and purging fields live (and are initialized) here, but are
- * logically owned by the page allocator. This is just a convenience (since
- * those fields would be duplicated for both the dirty and muzzy states
- * otherwise).
- */
-typedef struct decay_s decay_t;
-struct decay_s {
- /* Synchronizes all non-atomic fields. */
- malloc_mutex_t mtx;
- /*
- * True if a thread is currently purging the extents associated with
- * this decay structure.
- */
- bool purging;
- /*
- * Approximate time in milliseconds from the creation of a set of unused
- * dirty pages until an equivalent set of unused dirty pages is purged
- * and/or reused.
- */
- atomic_zd_t time_ms;
- /* time / SMOOTHSTEP_NSTEPS. */
- nstime_t interval;
- /*
- * Time at which the current decay interval logically started. We do
- * not actually advance to a new epoch until sometime after it starts
- * because of scheduling and computation delays, and it is even possible
- * to completely skip epochs. In all cases, during epoch advancement we
- * merge all relevant activity into the most recently recorded epoch.
- */
- nstime_t epoch;
- /* Deadline randomness generator. */
- uint64_t jitter_state;
- /*
- * Deadline for current epoch. This is the sum of interval and per
- * epoch jitter which is a uniform random variable in [0..interval).
- * Epochs always advance by precise multiples of interval, but we
- * randomize the deadline to reduce the likelihood of arenas purging in
- * lockstep.
- */
- nstime_t deadline;
- /*
- * The number of pages we cap ourselves at in the current epoch, per
- * decay policies. Updated on an epoch change. After an epoch change,
- * the caller should take steps to try to purge down to this amount.
- */
- size_t npages_limit;
- /*
- * Number of unpurged pages at beginning of current epoch. During epoch
- * advancement we use the delta between arena->decay_*.nunpurged and
- * ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
- * if any, were generated.
- */
- size_t nunpurged;
- /*
- * Trailing log of how many unused dirty pages were generated during
- * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
- * element is the most recent epoch. Corresponding epoch times are
- * relative to epoch.
- *
- * Updated only on epoch advance, triggered by
- * decay_maybe_advance_epoch, below.
- */
- size_t backlog[SMOOTHSTEP_NSTEPS];
-
- /* Peak number of pages in associated extents. Used for debug only. */
- uint64_t ceil_npages;
-};
-
-/*
- * The current decay time setting. This is the only public access to a decay_t
- * that's allowed without holding mtx.
- */
-static inline ssize_t
-decay_ms_read(const decay_t *decay) {
- return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
-}
-
-/*
- * See the comment on the struct field -- the limit on pages we should allow in
- * this decay state this epoch.
- */
-static inline size_t
-decay_npages_limit_get(const decay_t *decay) {
- return decay->npages_limit;
-}
-
-/* How many unused dirty pages were generated during the last epoch. */
-static inline size_t
-decay_epoch_npages_delta(const decay_t *decay) {
- return decay->backlog[SMOOTHSTEP_NSTEPS - 1];
-}
-
-/*
- * Current epoch duration, in nanoseconds. Given that new epochs are started
- * somewhat haphazardly, this is not necessarily exactly the time between any
- * two calls to decay_maybe_advance_epoch; see the comments on fields in the
- * decay_t.
- */
-static inline uint64_t
-decay_epoch_duration_ns(const decay_t *decay) {
- return nstime_ns(&decay->interval);
-}
-
-static inline bool
-decay_immediately(const decay_t *decay) {
- ssize_t decay_ms = decay_ms_read(decay);
- return decay_ms == 0;
-}
-
-static inline bool
-decay_disabled(const decay_t *decay) {
- ssize_t decay_ms = decay_ms_read(decay);
- return decay_ms < 0;
-}
-
-/* Returns true if decay is enabled and done gradually. */
-static inline bool
-decay_gradually(const decay_t *decay) {
- ssize_t decay_ms = decay_ms_read(decay);
- return decay_ms > 0;
-}
-
-/*
- * Returns true if the passed in decay time setting is valid.
- * < -1 : invalid
- * -1 : never decay
- * 0 : decay immediately
- * > 0 : some positive decay time, up to a maximum allowed value of
- * NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early
- * 27th century. By that time, we expect to have implemented alternate purging
- * strategies.
- */
-bool decay_ms_valid(ssize_t decay_ms);
-
-/*
- * As a precondition, the decay_t must be zeroed out (as if with memset).
- *
- * Returns true on error.
- */
-bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
-
-/*
- * Given an already-initialized decay_t, reinitialize it with the given decay
- * time. The decay_t must have previously been initialized (and should not then
- * be zeroed).
- */
-void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
-
-/*
- * Compute how many of 'npages_new' pages we would need to purge in 'time'.
- */
-uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
- size_t npages_new);
-
-/* Returns true if the epoch advanced and there are pages to purge. */
-bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
- size_t current_npages);
-
-/*
- * Calculates wait time until a number of pages in the interval
- * [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged.
- *
- * Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of
- * indefinite wait.
- */
-uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
- uint64_t npages_threshold);
-
-#endif /* JEMALLOC_INTERNAL_DECAY_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/div.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/div.h
deleted file mode 100644
index aebae939..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/div.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_DIV_H
-#define JEMALLOC_INTERNAL_DIV_H
-
-#include "jemalloc/internal/assert.h"
-
-/*
- * This module does the division that computes the index of a region in a slab,
- * given its offset relative to the base.
- * That is, given a divisor d, an n = i * d (all integers), we'll return i.
- * We do some pre-computation to do this more quickly than a CPU division
- * instruction.
- * We bound n < 2^32, and don't support dividing by one.
- */
-
-typedef struct div_info_s div_info_t;
-struct div_info_s {
- uint32_t magic;
-#ifdef JEMALLOC_DEBUG
- size_t d;
-#endif
-};
-
-void div_init(div_info_t *div_info, size_t divisor);
-
-static inline size_t
-div_compute(div_info_t *div_info, size_t n) {
- assert(n <= (uint32_t)-1);
- /*
- * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
- * the compilers I tried were all smart enough to turn this into the
- * appropriate "get the high 32 bits of the result of a multiply" (e.g.
- * mul; mov edx eax; on x86, umull on arm, etc.).
- */
- size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
-#ifdef JEMALLOC_DEBUG
- assert(i * div_info->d == n);
-#endif
- return i;
-}
-
-#endif /* JEMALLOC_INTERNAL_DIV_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ecache.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ecache.h
deleted file mode 100644
index 71cae3e3..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ecache.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ECACHE_H
-#define JEMALLOC_INTERNAL_ECACHE_H
-
-#include "jemalloc/internal/eset.h"
-#include "jemalloc/internal/san.h"
-#include "jemalloc/internal/mutex.h"
-
-typedef struct ecache_s ecache_t;
-struct ecache_s {
- malloc_mutex_t mtx;
- eset_t eset;
- eset_t guarded_eset;
- /* All stored extents must be in the same state. */
- extent_state_t state;
- /* The index of the ehooks the ecache is associated with. */
- unsigned ind;
- /*
- * If true, delay coalescing until eviction; otherwise coalesce during
- * deallocation.
- */
- bool delay_coalesce;
-};
-
-static inline size_t
-ecache_npages_get(ecache_t *ecache) {
- return eset_npages_get(&ecache->eset) +
- eset_npages_get(&ecache->guarded_eset);
-}
-
-/* Get the number of extents in the given page size index. */
-static inline size_t
-ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
- return eset_nextents_get(&ecache->eset, ind) +
- eset_nextents_get(&ecache->guarded_eset, ind);
-}
-
-/* Get the sum total bytes of the extents in the given page size index. */
-static inline size_t
-ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
- return eset_nbytes_get(&ecache->eset, ind) +
- eset_nbytes_get(&ecache->guarded_eset, ind);
-}
-
-static inline unsigned
-ecache_ind_get(ecache_t *ecache) {
- return ecache->ind;
-}
-
-bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
- unsigned ind, bool delay_coalesce);
-void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
-void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
-void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
-
-#endif /* JEMALLOC_INTERNAL_ECACHE_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/edata.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/edata.h
deleted file mode 100644
index af039ea7..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/edata.h
+++ /dev/null
@@ -1,698 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EDATA_H
-#define JEMALLOC_INTERNAL_EDATA_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bin_info.h"
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/hpdata.h"
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/slab_data.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/typed_list.h"
-
-/*
- * sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment
- * to free up the low bits in the rtree leaf.
- */
-#define EDATA_ALIGNMENT 128
-
-enum extent_state_e {
- extent_state_active = 0,
- extent_state_dirty = 1,
- extent_state_muzzy = 2,
- extent_state_retained = 3,
- extent_state_transition = 4, /* States below are intermediate. */
- extent_state_merging = 5,
- extent_state_max = 5 /* Sanity checking only. */
-};
-typedef enum extent_state_e extent_state_t;
-
-enum extent_head_state_e {
- EXTENT_NOT_HEAD,
- EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
-};
-typedef enum extent_head_state_e extent_head_state_t;
-
-/*
- * Which implementation of the page allocator interface, (PAI, defined in
- * pai.h) owns the given extent?
- */
-enum extent_pai_e {
- EXTENT_PAI_PAC = 0,
- EXTENT_PAI_HPA = 1
-};
-typedef enum extent_pai_e extent_pai_t;
-
-struct e_prof_info_s {
- /* Time when this was allocated. */
- nstime_t e_prof_alloc_time;
- /* Allocation request size. */
- size_t e_prof_alloc_size;
- /* Points to a prof_tctx_t. */
- atomic_p_t e_prof_tctx;
- /*
- * Points to a prof_recent_t for the allocation; NULL
- * means the recent allocation record no longer exists.
- * Protected by prof_recent_alloc_mtx.
- */
- atomic_p_t e_prof_recent_alloc;
-};
-typedef struct e_prof_info_s e_prof_info_t;
-
-/*
- * The information about a particular edata that lives in an emap. Space is
- * more precious there (the information, plus the edata pointer, has to live in
- * a 64-bit word if we want to enable a packed representation.
- *
- * There are two things that are special about the information here:
- * - It's quicker to access. You have one fewer pointer hop, since finding the
- * edata_t associated with an item always requires accessing the rtree leaf in
- * which this data is stored.
- * - It can be read unsynchronized, and without worrying about lifetime issues.
- */
-typedef struct edata_map_info_s edata_map_info_t;
-struct edata_map_info_s {
- bool slab;
- szind_t szind;
-};
-
-typedef struct edata_cmp_summary_s edata_cmp_summary_t;
-struct edata_cmp_summary_s {
- uint64_t sn;
- uintptr_t addr;
-};
-
-/* Extent (span of pages). Use accessor functions for e_* fields. */
-typedef struct edata_s edata_t;
-ph_structs(edata_avail, edata_t);
-ph_structs(edata_heap, edata_t);
-struct edata_s {
- /*
- * Bitfield containing several fields:
- *
- * a: arena_ind
- * b: slab
- * c: committed
- * p: pai
- * z: zeroed
- * g: guarded
- * t: state
- * i: szind
- * f: nfree
- * s: bin_shard
- *
- * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
- *
- * arena_ind: Arena from which this extent came, or all 1 bits if
- * unassociated.
- *
- * slab: The slab flag indicates whether the extent is used for a slab
- * of small regions. This helps differentiate small size classes,
- * and it indicates whether interior pointers can be looked up via
- * iealloc().
- *
- * committed: The committed flag indicates whether physical memory is
- * committed to the extent, whether explicitly or implicitly
- * as on a system that overcommits and satisfies physical
- * memory needs on demand via soft page faults.
- *
- * pai: The pai flag is an extent_pai_t.
- *
- * zeroed: The zeroed flag is used by extent recycling code to track
- * whether memory is zero-filled.
- *
- * guarded: The guarded flag is use by the sanitizer to track whether
- * the extent has page guards around it.
- *
- * state: The state flag is an extent_state_t.
- *
- * szind: The szind flag indicates usable size class index for
- * allocations residing in this extent, regardless of whether the
- * extent is a slab. Extent size and usable size often differ
- * even for non-slabs, either due to sz_large_pad or promotion of
- * sampled small regions.
- *
- * nfree: Number of free regions in slab.
- *
- * bin_shard: the shard of the bin from which this extent came.
- */
- uint64_t e_bits;
-#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
-
-#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
-#define EDATA_BITS_ARENA_SHIFT 0
-#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
-
-#define EDATA_BITS_SLAB_WIDTH 1
-#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
-#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
-
-#define EDATA_BITS_COMMITTED_WIDTH 1
-#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
-#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
-
-#define EDATA_BITS_PAI_WIDTH 1
-#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
-#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
-
-#define EDATA_BITS_ZEROED_WIDTH 1
-#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
-#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
-
-#define EDATA_BITS_GUARDED_WIDTH 1
-#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
-#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
-
-#define EDATA_BITS_STATE_WIDTH 3
-#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
-#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
-
-#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
-#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
-#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
-
-#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
-#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
-#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
-
-#define EDATA_BITS_BINSHARD_WIDTH 6
-#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
-#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
-
-#define EDATA_BITS_IS_HEAD_WIDTH 1
-#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
-#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
-
- /* Pointer to the extent that this structure is responsible for. */
- void *e_addr;
-
- union {
- /*
- * Extent size and serial number associated with the extent
- * structure (different than the serial number for the extent at
- * e_addr).
- *
- * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
- */
- size_t e_size_esn;
- #define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
- #define EDATA_ESN_MASK ((size_t)PAGE-1)
- /* Base extent size, which may not be a multiple of PAGE. */
- size_t e_bsize;
- };
-
- /*
- * If this edata is a user allocation from an HPA, it comes out of some
- * pageslab (we don't yet support huegpage allocations that don't fit
- * into pageslabs). This tracks it.
- */
- hpdata_t *e_ps;
-
- /*
- * Serial number. These are not necessarily unique; splitting an extent
- * results in two extents with the same serial number.
- */
- uint64_t e_sn;
-
- union {
- /*
- * List linkage used when the edata_t is active; either in
- * arena's large allocations or bin_t's slabs_full.
- */
- ql_elm(edata_t) ql_link_active;
- /*
- * Pairing heap linkage. Used whenever the extent is inactive
- * (in the page allocators), or when it is active and in
- * slabs_nonfull, or when the edata_t is unassociated with an
- * extent and sitting in an edata_cache.
- */
- union {
- edata_heap_link_t heap_link;
- edata_avail_link_t avail_link;
- };
- };
-
- union {
- /*
- * List linkage used when the extent is inactive:
- * - Stashed dirty extents
- * - Ecache LRU functionality.
- */
- ql_elm(edata_t) ql_link_inactive;
- /* Small region slab metadata. */
- slab_data_t e_slab_data;
-
- /* Profiling data, used for large objects. */
- e_prof_info_t e_prof_info;
- };
-};
-
-TYPED_LIST(edata_list_active, edata_t, ql_link_active)
-TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
-
-static inline unsigned
-edata_arena_ind_get(const edata_t *edata) {
- unsigned arena_ind = (unsigned)((edata->e_bits &
- EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
- assert(arena_ind < MALLOCX_ARENA_LIMIT);
-
- return arena_ind;
-}
-
-static inline szind_t
-edata_szind_get_maybe_invalid(const edata_t *edata) {
- szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
- EDATA_BITS_SZIND_SHIFT);
- assert(szind <= SC_NSIZES);
- return szind;
-}
-
-static inline szind_t
-edata_szind_get(const edata_t *edata) {
- szind_t szind = edata_szind_get_maybe_invalid(edata);
- assert(szind < SC_NSIZES); /* Never call when "invalid". */
- return szind;
-}
-
-static inline size_t
-edata_usize_get(const edata_t *edata) {
- return sz_index2size(edata_szind_get(edata));
-}
-
-static inline unsigned
-edata_binshard_get(const edata_t *edata) {
- unsigned binshard = (unsigned)((edata->e_bits &
- EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
- assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
- return binshard;
-}
-
-static inline uint64_t
-edata_sn_get(const edata_t *edata) {
- return edata->e_sn;
-}
-
-static inline extent_state_t
-edata_state_get(const edata_t *edata) {
- return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
- EDATA_BITS_STATE_SHIFT);
-}
-
-static inline bool
-edata_guarded_get(const edata_t *edata) {
- return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
- EDATA_BITS_GUARDED_SHIFT);
-}
-
-static inline bool
-edata_zeroed_get(const edata_t *edata) {
- return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
- EDATA_BITS_ZEROED_SHIFT);
-}
-
-static inline bool
-edata_committed_get(const edata_t *edata) {
- return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
- EDATA_BITS_COMMITTED_SHIFT);
-}
-
-static inline extent_pai_t
-edata_pai_get(const edata_t *edata) {
- return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
- EDATA_BITS_PAI_SHIFT);
-}
-
-static inline bool
-edata_slab_get(const edata_t *edata) {
- return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
- EDATA_BITS_SLAB_SHIFT);
-}
-
-static inline unsigned
-edata_nfree_get(const edata_t *edata) {
- assert(edata_slab_get(edata));
- return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
- EDATA_BITS_NFREE_SHIFT);
-}
-
-static inline void *
-edata_base_get(const edata_t *edata) {
- assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
- !edata_slab_get(edata));
- return PAGE_ADDR2BASE(edata->e_addr);
-}
-
-static inline void *
-edata_addr_get(const edata_t *edata) {
- assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
- !edata_slab_get(edata));
- return edata->e_addr;
-}
-
-static inline size_t
-edata_size_get(const edata_t *edata) {
- return (edata->e_size_esn & EDATA_SIZE_MASK);
-}
-
-static inline size_t
-edata_esn_get(const edata_t *edata) {
- return (edata->e_size_esn & EDATA_ESN_MASK);
-}
-
-static inline size_t
-edata_bsize_get(const edata_t *edata) {
- return edata->e_bsize;
-}
-
-static inline hpdata_t *
-edata_ps_get(const edata_t *edata) {
- assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
- return edata->e_ps;
-}
-
-static inline void *
-edata_before_get(const edata_t *edata) {
- return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
-}
-
-static inline void *
-edata_last_get(const edata_t *edata) {
- return (void *)((uintptr_t)edata_base_get(edata) +
- edata_size_get(edata) - PAGE);
-}
-
-static inline void *
-edata_past_get(const edata_t *edata) {
- return (void *)((uintptr_t)edata_base_get(edata) +
- edata_size_get(edata));
-}
-
-static inline slab_data_t *
-edata_slab_data_get(edata_t *edata) {
- assert(edata_slab_get(edata));
- return &edata->e_slab_data;
-}
-
-static inline const slab_data_t *
-edata_slab_data_get_const(const edata_t *edata) {
- assert(edata_slab_get(edata));
- return &edata->e_slab_data;
-}
-
-static inline prof_tctx_t *
-edata_prof_tctx_get(const edata_t *edata) {
- return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
- ATOMIC_ACQUIRE);
-}
-
-static inline const nstime_t *
-edata_prof_alloc_time_get(const edata_t *edata) {
- return &edata->e_prof_info.e_prof_alloc_time;
-}
-
-static inline size_t
-edata_prof_alloc_size_get(const edata_t *edata) {
- return edata->e_prof_info.e_prof_alloc_size;
-}
-
-static inline prof_recent_t *
-edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
- return (prof_recent_t *)atomic_load_p(
- &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
-}
-
-static inline void
-edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
- ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
-}
-
-static inline void
-edata_binshard_set(edata_t *edata, unsigned binshard) {
- /* The assertion assumes szind is set already. */
- assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
- ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
-}
-
-static inline void
-edata_addr_set(edata_t *edata, void *addr) {
- edata->e_addr = addr;
-}
-
-static inline void
-edata_size_set(edata_t *edata, size_t size) {
- assert((size & ~EDATA_SIZE_MASK) == 0);
- edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
-}
-
-static inline void
-edata_esn_set(edata_t *edata, size_t esn) {
- edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
- EDATA_ESN_MASK);
-}
-
-static inline void
-edata_bsize_set(edata_t *edata, size_t bsize) {
- edata->e_bsize = bsize;
-}
-
-static inline void
-edata_ps_set(edata_t *edata, hpdata_t *ps) {
- assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
- edata->e_ps = ps;
-}
-
-static inline void
-edata_szind_set(edata_t *edata, szind_t szind) {
- assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
- ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
-}
-
-static inline void
-edata_nfree_set(edata_t *edata, unsigned nfree) {
- assert(edata_slab_get(edata));
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
- ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
-}
-
-static inline void
-edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
- /* The assertion assumes szind is set already. */
- assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
- edata->e_bits = (edata->e_bits &
- (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
- ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
- ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
-}
-
-static inline void
-edata_nfree_inc(edata_t *edata) {
- assert(edata_slab_get(edata));
- edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
-}
-
-static inline void
-edata_nfree_dec(edata_t *edata) {
- assert(edata_slab_get(edata));
- edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
-}
-
-static inline void
-edata_nfree_sub(edata_t *edata, uint64_t n) {
- assert(edata_slab_get(edata));
- edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
-}
-
-static inline void
-edata_sn_set(edata_t *edata, uint64_t sn) {
- edata->e_sn = sn;
-}
-
-static inline void
-edata_state_set(edata_t *edata, extent_state_t state) {
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
- ((uint64_t)state << EDATA_BITS_STATE_SHIFT);
-}
-
-static inline void
-edata_guarded_set(edata_t *edata, bool guarded) {
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
- ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
-}
-
-static inline void
-edata_zeroed_set(edata_t *edata, bool zeroed) {
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
- ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
-}
-
-static inline void
-edata_committed_set(edata_t *edata, bool committed) {
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
- ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
-}
-
-static inline void
-edata_pai_set(edata_t *edata, extent_pai_t pai) {
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
- ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
-}
-
-static inline void
-edata_slab_set(edata_t *edata, bool slab) {
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
- ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
-}
-
-static inline void
-edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
- atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
-}
-
-static inline void
-edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
- nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
-}
-
-static inline void
-edata_prof_alloc_size_set(edata_t *edata, size_t size) {
- edata->e_prof_info.e_prof_alloc_size = size;
-}
-
-static inline void
-edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
- prof_recent_t *recent_alloc) {
- atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
- ATOMIC_RELAXED);
-}
-
-static inline bool
-edata_is_head_get(edata_t *edata) {
- return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
- EDATA_BITS_IS_HEAD_SHIFT);
-}
-
-static inline void
-edata_is_head_set(edata_t *edata, bool is_head) {
- edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
- ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
-}
-
-static inline bool
-edata_state_in_transition(extent_state_t state) {
- return state >= extent_state_transition;
-}
-
-/*
- * Because this function is implemented as a sequence of bitfield modifications,
- * even though each individual bit is properly initialized, we technically read
- * uninitialized data within it. This is mostly fine, since most callers get
- * their edatas from zeroing sources, but callers who make stack edata_ts need
- * to manually zero them.
- */
-static inline void
-edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
- bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
- bool committed, extent_pai_t pai, extent_head_state_t is_head) {
- assert(addr == PAGE_ADDR2BASE(addr) || !slab);
-
- edata_arena_ind_set(edata, arena_ind);
- edata_addr_set(edata, addr);
- edata_size_set(edata, size);
- edata_slab_set(edata, slab);
- edata_szind_set(edata, szind);
- edata_sn_set(edata, sn);
- edata_state_set(edata, state);
- edata_guarded_set(edata, false);
- edata_zeroed_set(edata, zeroed);
- edata_committed_set(edata, committed);
- edata_pai_set(edata, pai);
- edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
- if (config_prof) {
- edata_prof_tctx_set(edata, NULL);
- }
-}
-
-static inline void
-edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
- edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
- edata_addr_set(edata, addr);
- edata_bsize_set(edata, bsize);
- edata_slab_set(edata, false);
- edata_szind_set(edata, SC_NSIZES);
- edata_sn_set(edata, sn);
- edata_state_set(edata, extent_state_active);
- edata_guarded_set(edata, false);
- edata_zeroed_set(edata, true);
- edata_committed_set(edata, true);
- /*
- * This isn't strictly true, but base allocated extents never get
- * deallocated and can't be looked up in the emap, but no sense in
- * wasting a state bit to encode this fact.
- */
- edata_pai_set(edata, EXTENT_PAI_PAC);
-}
-
-static inline int
-edata_esn_comp(const edata_t *a, const edata_t *b) {
- size_t a_esn = edata_esn_get(a);
- size_t b_esn = edata_esn_get(b);
-
- return (a_esn > b_esn) - (a_esn < b_esn);
-}
-
-static inline int
-edata_ead_comp(const edata_t *a, const edata_t *b) {
- uintptr_t a_eaddr = (uintptr_t)a;
- uintptr_t b_eaddr = (uintptr_t)b;
-
- return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
-}
-
-static inline edata_cmp_summary_t
-edata_cmp_summary_get(const edata_t *edata) {
- return (edata_cmp_summary_t){edata_sn_get(edata),
- (uintptr_t)edata_addr_get(edata)};
-}
-
-static inline int
-edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
- int ret;
- ret = (a.sn > b.sn) - (a.sn < b.sn);
- if (ret != 0) {
- return ret;
- }
- ret = (a.addr > b.addr) - (a.addr < b.addr);
- return ret;
-}
-
-static inline int
-edata_snad_comp(const edata_t *a, const edata_t *b) {
- edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
- edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
-
- return edata_cmp_summary_comp(a_cmp, b_cmp);
-}
-
-static inline int
-edata_esnead_comp(const edata_t *a, const edata_t *b) {
- int ret;
-
- ret = edata_esn_comp(a, b);
- if (ret != 0) {
- return ret;
- }
-
- ret = edata_ead_comp(a, b);
- return ret;
-}
-
-ph_proto(, edata_avail, edata_t)
-ph_proto(, edata_heap, edata_t)
-
-#endif /* JEMALLOC_INTERNAL_EDATA_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/edata_cache.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/edata_cache.h
deleted file mode 100644
index 8b6c0ef7..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/edata_cache.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H
-#define JEMALLOC_INTERNAL_EDATA_CACHE_H
-
-#include "jemalloc/internal/base.h"
-
-/* For tests only. */
-#define EDATA_CACHE_FAST_FILL 4
-
-/*
- * A cache of edata_t structures allocated via base_alloc_edata (as opposed to
- * the underlying extents they describe). The contents of returned edata_t
- * objects are garbage and cannot be relied upon.
- */
-
-typedef struct edata_cache_s edata_cache_t;
-struct edata_cache_s {
- edata_avail_t avail;
- atomic_zu_t count;
- malloc_mutex_t mtx;
- base_t *base;
-};
-
-bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
-edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
-void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
-
-void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache);
-void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache);
-void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
-
-/*
- * An edata_cache_small is like an edata_cache, but it relies on external
- * synchronization and avoids first-fit strategies.
- */
-
-typedef struct edata_cache_fast_s edata_cache_fast_t;
-struct edata_cache_fast_s {
- edata_list_inactive_t list;
- edata_cache_t *fallback;
- bool disabled;
-};
-
-void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
-edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
-void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
- edata_t *edata);
-void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
-
-#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ehooks.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ehooks.h
deleted file mode 100644
index 8d9513e2..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ehooks.h
+++ /dev/null
@@ -1,412 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EHOOKS_H
-#define JEMALLOC_INTERNAL_EHOOKS_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/extent_mmap.h"
-
-/*
- * This module is the internal interface to the extent hooks (both
- * user-specified and external). Eventually, this will give us the flexibility
- * to use multiple different versions of user-visible extent-hook APIs under a
- * single user interface.
- *
- * Current API expansions (not available to anyone but the default hooks yet):
- * - Head state tracking. Hooks can decide whether or not to merge two
- * extents based on whether or not one of them is the head (i.e. was
- * allocated on its own). The later extent loses its "head" status.
- */
-
-extern const extent_hooks_t ehooks_default_extent_hooks;
-
-typedef struct ehooks_s ehooks_t;
-struct ehooks_s {
- /*
- * The user-visible id that goes with the ehooks (i.e. that of the base
- * they're a part of, the associated arena's index within the arenas
- * array).
- */
- unsigned ind;
- /* Logically an extent_hooks_t *. */
- atomic_p_t ptr;
-};
-
-extern const extent_hooks_t ehooks_default_extent_hooks;
-
-/*
- * These are not really part of the public API. Each hook has a fast-path for
- * the default-hooks case that can avoid various small inefficiencies:
- * - Forgetting tsd and then calling tsd_get within the hook.
- * - Getting more state than necessary out of the extent_t.
- * - Doing arena_ind -> arena -> arena_ind lookups.
- * By making the calls to these functions visible to the compiler, it can move
- * those extra bits of computation down below the fast-paths where they get ignored.
- */
-void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
-bool ehooks_default_dalloc_impl(void *addr, size_t size);
-void ehooks_default_destroy_impl(void *addr, size_t size);
-bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
-bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
-#ifdef PAGES_CAN_PURGE_LAZY
-bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
-#endif
-#ifdef PAGES_CAN_PURGE_FORCED
-bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
-#endif
-bool ehooks_default_split_impl();
-/*
- * Merge is the only default extent hook we declare -- see the comment in
- * ehooks_merge.
- */
-bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
- size_t size_a, void *addr_b, size_t size_b, bool committed,
- unsigned arena_ind);
-bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
-void ehooks_default_zero_impl(void *addr, size_t size);
-void ehooks_default_guard_impl(void *guard1, void *guard2);
-void ehooks_default_unguard_impl(void *guard1, void *guard2);
-
-/*
- * We don't officially support reentrancy from wtihin the extent hooks. But
- * various people who sit within throwing distance of the jemalloc team want
- * that functionality in certain limited cases. The default reentrancy guards
- * assert that we're not reentrant from a0 (since it's the bootstrap arena,
- * where reentrant allocations would be redirected), which we would incorrectly
- * trigger in cases where a0 has extent hooks (those hooks themselves can't be
- * reentrant, then, but there are reasonable uses for such functionality, like
- * putting internal metadata on hugepages). Therefore, we use the raw
- * reentrancy guards.
- *
- * Eventually, we need to think more carefully about whether and where we
- * support allocating from within extent hooks (and what that means for things
- * like profiling, stats collection, etc.), and document what the guarantee is.
- */
-static inline void
-ehooks_pre_reentrancy(tsdn_t *tsdn) {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- tsd_pre_reentrancy_raw(tsd);
-}
-
-static inline void
-ehooks_post_reentrancy(tsdn_t *tsdn) {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- tsd_post_reentrancy_raw(tsd);
-}
-
-/* Beginning of the public API. */
-void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind);
-
-static inline unsigned
-ehooks_ind_get(const ehooks_t *ehooks) {
- return ehooks->ind;
-}
-
-static inline void
-ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
- atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE);
-}
-
-static inline extent_hooks_t *
-ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
- return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
-}
-
-static inline bool
-ehooks_are_default(ehooks_t *ehooks) {
- return ehooks_get_extent_hooks_ptr(ehooks) ==
- &ehooks_default_extent_hooks;
-}
-
-/*
- * In some cases, a caller needs to allocate resources before attempting to call
- * a hook. If that hook is doomed to fail, this is wasteful. We therefore
- * include some checks for such cases.
- */
-static inline bool
-ehooks_dalloc_will_fail(ehooks_t *ehooks) {
- if (ehooks_are_default(ehooks)) {
- return opt_retain;
- } else {
- return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL;
- }
-}
-
-static inline bool
-ehooks_split_will_fail(ehooks_t *ehooks) {
- return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
-}
-
-static inline bool
-ehooks_merge_will_fail(ehooks_t *ehooks) {
- return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
-}
-
-static inline bool
-ehooks_guard_will_fail(ehooks_t *ehooks) {
- /*
- * Before the guard hooks are officially introduced, limit the use to
- * the default hooks only.
- */
- return !ehooks_are_default(ehooks);
-}
-
-/*
- * Some hooks are required to return zeroed memory in certain situations. In
- * debug mode, we do some heuristic checks that they did what they were supposed
- * to.
- *
- * This isn't really ehooks-specific (i.e. anyone can check for zeroed memory).
- * But incorrect zero information indicates an ehook bug.
- */
-static inline void
-ehooks_debug_zero_check(void *addr, size_t size) {
- assert(((uintptr_t)addr & PAGE_MASK) == 0);
- assert((size & PAGE_MASK) == 0);
- assert(size > 0);
- if (config_debug) {
- /* Check the whole first page. */
- size_t *p = (size_t *)addr;
- for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
- assert(p[i] == 0);
- }
- /*
- * And 4 spots within. There's a tradeoff here; the larger
- * this number, the more likely it is that we'll catch a bug
- * where ehooks return a sparsely non-zero range. But
- * increasing the number of checks also increases the number of
- * page faults in debug mode. FreeBSD does much of their
- * day-to-day development work in debug mode, so we don't want
- * even the debug builds to be too slow.
- */
- const size_t nchecks = 4;
- assert(PAGE >= sizeof(size_t) * nchecks);
- for (size_t i = 0; i < nchecks; ++i) {
- assert(p[i * (size / sizeof(size_t) / nchecks)] == 0);
- }
- }
-}
-
-
-static inline void *
-ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit) {
- bool orig_zero = *zero;
- void *ret;
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- if (extent_hooks == &ehooks_default_extent_hooks) {
- ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
- alignment, zero, commit, ehooks_ind_get(ehooks));
- } else {
- ehooks_pre_reentrancy(tsdn);
- ret = extent_hooks->alloc(extent_hooks, new_addr, size,
- alignment, zero, commit, ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- }
- assert(new_addr == NULL || ret == NULL || new_addr == ret);
- assert(!orig_zero || *zero);
- if (*zero && ret != NULL) {
- ehooks_debug_zero_check(ret, size);
- }
- return ret;
-}
-
-static inline bool
-ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
- bool committed) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- if (extent_hooks == &ehooks_default_extent_hooks) {
- return ehooks_default_dalloc_impl(addr, size);
- } else if (extent_hooks->dalloc == NULL) {
- return true;
- } else {
- ehooks_pre_reentrancy(tsdn);
- bool err = extent_hooks->dalloc(extent_hooks, addr, size,
- committed, ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- return err;
- }
-}
-
-static inline void
-ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
- bool committed) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- if (extent_hooks == &ehooks_default_extent_hooks) {
- ehooks_default_destroy_impl(addr, size);
- } else if (extent_hooks->destroy == NULL) {
- /* Do nothing. */
- } else {
- ehooks_pre_reentrancy(tsdn);
- extent_hooks->destroy(extent_hooks, addr, size, committed,
- ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- }
-}
-
-static inline bool
-ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
- size_t offset, size_t length) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- bool err;
- if (extent_hooks == &ehooks_default_extent_hooks) {
- err = ehooks_default_commit_impl(addr, offset, length);
- } else if (extent_hooks->commit == NULL) {
- err = true;
- } else {
- ehooks_pre_reentrancy(tsdn);
- err = extent_hooks->commit(extent_hooks, addr, size,
- offset, length, ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- }
- if (!err) {
- ehooks_debug_zero_check(addr, size);
- }
- return err;
-}
-
-static inline bool
-ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
- size_t offset, size_t length) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- if (extent_hooks == &ehooks_default_extent_hooks) {
- return ehooks_default_decommit_impl(addr, offset, length);
- } else if (extent_hooks->decommit == NULL) {
- return true;
- } else {
- ehooks_pre_reentrancy(tsdn);
- bool err = extent_hooks->decommit(extent_hooks, addr, size,
- offset, length, ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- return err;
- }
-}
-
-static inline bool
-ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
- size_t offset, size_t length) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
-#ifdef PAGES_CAN_PURGE_LAZY
- if (extent_hooks == &ehooks_default_extent_hooks) {
- return ehooks_default_purge_lazy_impl(addr, offset, length);
- }
-#endif
- if (extent_hooks->purge_lazy == NULL) {
- return true;
- } else {
- ehooks_pre_reentrancy(tsdn);
- bool err = extent_hooks->purge_lazy(extent_hooks, addr, size,
- offset, length, ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- return err;
- }
-}
-
-static inline bool
-ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
- size_t offset, size_t length) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- /*
- * It would be correct to have a ehooks_debug_zero_check call at the end
- * of this function; purge_forced is required to zero. But checking
- * would touch the page in question, which may have performance
- * consequences (imagine the hooks are using hugepages, with a global
- * zero page off). Even in debug mode, it's usually a good idea to
- * avoid cases that can dramatically increase memory consumption.
- */
-#ifdef PAGES_CAN_PURGE_FORCED
- if (extent_hooks == &ehooks_default_extent_hooks) {
- return ehooks_default_purge_forced_impl(addr, offset, length);
- }
-#endif
- if (extent_hooks->purge_forced == NULL) {
- return true;
- } else {
- ehooks_pre_reentrancy(tsdn);
- bool err = extent_hooks->purge_forced(extent_hooks, addr, size,
- offset, length, ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- return err;
- }
-}
-
-static inline bool
-ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
- size_t size_a, size_t size_b, bool committed) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- if (ehooks_are_default(ehooks)) {
- return ehooks_default_split_impl();
- } else if (extent_hooks->split == NULL) {
- return true;
- } else {
- ehooks_pre_reentrancy(tsdn);
- bool err = extent_hooks->split(extent_hooks, addr, size, size_a,
- size_b, committed, ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- return err;
- }
-}
-
-static inline bool
-ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a,
- void *addr_b, size_t size_b, bool committed) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- if (extent_hooks == &ehooks_default_extent_hooks) {
- return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
- } else if (extent_hooks->merge == NULL) {
- return true;
- } else {
- ehooks_pre_reentrancy(tsdn);
- bool err = extent_hooks->merge(extent_hooks, addr_a, size_a,
- addr_b, size_b, committed, ehooks_ind_get(ehooks));
- ehooks_post_reentrancy(tsdn);
- return err;
- }
-}
-
-static inline void
-ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
- if (extent_hooks == &ehooks_default_extent_hooks) {
- ehooks_default_zero_impl(addr, size);
- } else {
- /*
- * It would be correct to try using the user-provided purge
- * hooks (since they are required to have zeroed the extent if
- * they indicate success), but we don't necessarily know their
- * cost. We'll be conservative and use memset.
- */
- memset(addr, 0, size);
- }
-}
-
-static inline bool
-ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
- bool err;
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
-
- if (extent_hooks == &ehooks_default_extent_hooks) {
- ehooks_default_guard_impl(guard1, guard2);
- err = false;
- } else {
- err = true;
- }
-
- return err;
-}
-
-static inline bool
-ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
- bool err;
- extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
-
- if (extent_hooks == &ehooks_default_extent_hooks) {
- ehooks_default_unguard_impl(guard1, guard2);
- err = false;
- } else {
- err = true;
- }
-
- return err;
-}
-
-#endif /* JEMALLOC_INTERNAL_EHOOKS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/emap.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/emap.h
deleted file mode 100644
index 847af327..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/emap.h
+++ /dev/null
@@ -1,357 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EMAP_H
-#define JEMALLOC_INTERNAL_EMAP_H
-
-#include "jemalloc/internal/base.h"
-#include "jemalloc/internal/rtree.h"
-
-/*
- * Note: Ends without at semicolon, so that
- * EMAP_DECLARE_RTREE_CTX;
- * in uses will avoid empty-statement warnings.
- */
-#define EMAP_DECLARE_RTREE_CTX \
- rtree_ctx_t rtree_ctx_fallback; \
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
-
-typedef struct emap_s emap_t;
-struct emap_s {
- rtree_t rtree;
-};
-
-/* Used to pass rtree lookup context down the path. */
-typedef struct emap_alloc_ctx_t emap_alloc_ctx_t;
-struct emap_alloc_ctx_t {
- szind_t szind;
- bool slab;
-};
-
-typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
-struct emap_full_alloc_ctx_s {
- szind_t szind;
- bool slab;
- edata_t *edata;
-};
-
-bool emap_init(emap_t *emap, base_t *base, bool zeroed);
-
-void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
- bool slab);
-
-void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
- extent_state_t state);
-
-/*
- * The two acquire functions below allow accessing neighbor edatas, if it's safe
- * and valid to do so (i.e. from the same arena, of the same state, etc.). This
- * is necessary because the ecache locks are state based, and only protect
- * edatas with the same state. Therefore the neighbor edata's state needs to be
- * verified first, before chasing the edata pointer. The returned edata will be
- * in an acquired state, meaning other threads will be prevented from accessing
- * it, even if technically the edata can still be discovered from the rtree.
- *
- * This means, at any moment when holding pointers to edata, either one of the
- * state based locks is held (and the edatas are all of the protected state), or
- * the edatas are in an acquired state (e.g. in active or merging state). The
- * acquire operation itself (changing the edata to an acquired state) is done
- * under the state locks.
- */
-edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
- edata_t *edata, extent_pai_t pai, extent_state_t expected_state,
- bool forward);
-edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
- edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
-void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
- extent_state_t new_state);
-
-/*
- * Associate the given edata with its beginning and end address, setting the
- * szind and slab info appropriately.
- * Returns true on error (i.e. resource exhaustion).
- */
-bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
- szind_t szind, bool slab);
-
-/*
- * Does the same thing, but with the interior of the range, for slab
- * allocations.
- *
- * You might wonder why we don't just have a single emap_register function that
- * does both depending on the value of 'slab'. The answer is twofold:
- * - As a practical matter, in places like the extract->split->commit pathway,
- * we defer the interior operation until we're sure that the commit won't fail
- * (but we have to register the split boundaries there).
- * - In general, we're trying to move to a world where the page-specific
- * allocator doesn't know as much about how the pages it allocates will be
- * used, and passing a 'slab' parameter everywhere makes that more
- * complicated.
- *
- * Unlike the boundary version, this function can't fail; this is because slabs
- * can't get big enough to touch a new page that neither of the boundaries
- * touched, so no allocation is necessary to fill the interior once the boundary
- * has been touched.
- */
-void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
- szind_t szind);
-
-void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
-void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
-
-typedef struct emap_prepare_s emap_prepare_t;
-struct emap_prepare_s {
- rtree_leaf_elm_t *lead_elm_a;
- rtree_leaf_elm_t *lead_elm_b;
- rtree_leaf_elm_t *trail_elm_a;
- rtree_leaf_elm_t *trail_elm_b;
-};
-
-/**
- * These functions the emap metadata management for merging, splitting, and
- * reusing extents. In particular, they set the boundary mappings from
- * addresses to edatas. If the result is going to be used as a slab, you
- * still need to call emap_register_interior on it, though.
- *
- * Remap simply changes the szind and slab status of an extent's boundary
- * mappings. If the extent is not a slab, it doesn't bother with updating the
- * end mapping (since lookups only occur in the interior of an extent for
- * slabs). Since the szind and slab status only make sense for active extents,
- * this should only be called while activating or deactivating an extent.
- *
- * Split and merge have a "prepare" and a "commit" portion. The prepare portion
- * does the operations that can be done without exclusive access to the extent
- * in question, while the commit variant requires exclusive access to maintain
- * the emap invariants. The only function that can fail is emap_split_prepare,
- * and it returns true on failure (at which point the caller shouldn't commit).
- *
- * In all cases, "lead" refers to the lower-addressed extent, and trail to the
- * higher-addressed one. It's the caller's responsibility to set the edata
- * state appropriately.
- */
-bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
- edata_t *edata, size_t size_a, edata_t *trail, size_t size_b);
-void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
- edata_t *lead, size_t size_a, edata_t *trail, size_t size_b);
-void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
- edata_t *lead, edata_t *trail);
-void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
- edata_t *lead, edata_t *trail);
-
-/* Assert that the emap's view of the given edata matches the edata's view. */
-void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
-static inline void
-emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
- if (config_debug) {
- emap_do_assert_mapped(tsdn, emap, edata);
- }
-}
-
-/* Assert that the given edata isn't in the map. */
-void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
-static inline void
-emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
- if (config_debug) {
- emap_do_assert_not_mapped(tsdn, emap, edata);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
- assert(config_debug);
- emap_assert_mapped(tsdn, emap, edata);
-
- EMAP_DECLARE_RTREE_CTX;
- rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
- (uintptr_t)edata_base_get(edata));
-
- return edata_state_in_transition(contents.metadata.state);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
- if (!config_debug) {
- /* For assertions only. */
- return false;
- }
-
- /*
- * The edata is considered acquired if no other threads will attempt to
- * read / write any fields from it. This includes a few cases:
- *
- * 1) edata not hooked into emap yet -- This implies the edata just got
- * allocated or initialized.
- *
- * 2) in an active or transition state -- In both cases, the edata can
- * be discovered from the emap, however the state tracked in the rtree
- * will prevent other threads from accessing the actual edata.
- */
- EMAP_DECLARE_RTREE_CTX;
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
- rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
- /* init_missing */ false);
- if (elm == NULL) {
- return true;
- }
- rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
- /* dependent */ true);
- if (contents.edata == NULL ||
- contents.metadata.state == extent_state_active ||
- edata_state_in_transition(contents.metadata.state)) {
- return true;
- }
-
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
- assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer));
- assert(edata_pai_get(inner) == edata_pai_get(outer));
- assert(edata_committed_get(inner) == edata_committed_get(outer));
- assert(edata_state_get(inner) == extent_state_active);
- assert(edata_state_get(outer) == extent_state_merging);
- assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
- assert(edata_base_get(inner) == edata_past_get(outer) ||
- edata_base_get(outer) == edata_past_get(inner));
-}
-
-JEMALLOC_ALWAYS_INLINE void
-extent_assert_can_expand(const edata_t *original, const edata_t *expand) {
- assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand));
- assert(edata_pai_get(original) == edata_pai_get(expand));
- assert(edata_state_get(original) == extent_state_active);
- assert(edata_state_get(expand) == extent_state_merging);
- assert(edata_past_get(original) == edata_base_get(expand));
-}
-
-JEMALLOC_ALWAYS_INLINE edata_t *
-emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
- EMAP_DECLARE_RTREE_CTX;
-
- return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata;
-}
-
-/* Fills in alloc_ctx with the info in the map. */
-JEMALLOC_ALWAYS_INLINE void
-emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
- emap_alloc_ctx_t *alloc_ctx) {
- EMAP_DECLARE_RTREE_CTX;
-
- rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree,
- rtree_ctx, (uintptr_t)ptr);
- alloc_ctx->szind = metadata.szind;
- alloc_ctx->slab = metadata.slab;
-}
-
-/* The pointer must be mapped. */
-JEMALLOC_ALWAYS_INLINE void
-emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
- emap_full_alloc_ctx_t *full_alloc_ctx) {
- EMAP_DECLARE_RTREE_CTX;
-
- rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
- (uintptr_t)ptr);
- full_alloc_ctx->edata = contents.edata;
- full_alloc_ctx->szind = contents.metadata.szind;
- full_alloc_ctx->slab = contents.metadata.slab;
-}
-
-/*
- * The pointer is allowed to not be mapped.
- *
- * Returns true when the pointer is not present.
- */
-JEMALLOC_ALWAYS_INLINE bool
-emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
- emap_full_alloc_ctx_t *full_alloc_ctx) {
- EMAP_DECLARE_RTREE_CTX;
-
- rtree_contents_t contents;
- bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx,
- (uintptr_t)ptr, &contents);
- if (err) {
- return true;
- }
- full_alloc_ctx->edata = contents.edata;
- full_alloc_ctx->szind = contents.metadata.szind;
- full_alloc_ctx->slab = contents.metadata.slab;
- return false;
-}
-
-/*
- * Only used on the fastpath of free. Returns true when cannot be fulfilled by
- * fast path, e.g. when the metadata key is not cached.
- */
-JEMALLOC_ALWAYS_INLINE bool
-emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
- emap_alloc_ctx_t *alloc_ctx) {
- /* Use the unsafe getter since this may gets called during exit. */
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd);
-
- rtree_metadata_t metadata;
- bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree,
- rtree_ctx, (uintptr_t)ptr, &metadata);
- if (err) {
- return true;
- }
- alloc_ctx->szind = metadata.szind;
- alloc_ctx->slab = metadata.slab;
- return false;
-}
-
-/*
- * We want to do batch lookups out of the cache bins, which use
- * cache_bin_ptr_array_get to access the i'th element of the bin (since they
- * invert usual ordering in deciding what to flush). This lets the emap avoid
- * caring about its caller's ordering.
- */
-typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind);
-/*
- * This allows size-checking assertions, which we can only do while we're in the
- * process of edata lookups.
- */
-typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
-
-typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t;
-union emap_batch_lookup_result_u {
- edata_t *edata;
- rtree_leaf_elm_t *rtree_leaf;
-};
-
-JEMALLOC_ALWAYS_INLINE void
-emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs,
- emap_ptr_getter ptr_getter, void *ptr_getter_ctx,
- emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx,
- emap_batch_lookup_result_t *result) {
- /* Avoids null-checking tsdn in the loop below. */
- util_assume(tsd != NULL);
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd);
-
- for (size_t i = 0; i < nptrs; i++) {
- const void *ptr = ptr_getter(ptr_getter_ctx, i);
- /*
- * Reuse the edatas array as a temp buffer, lying a little about
- * the types.
- */
- result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd),
- &emap->rtree, rtree_ctx, (uintptr_t)ptr,
- /* dependent */ true, /* init_missing */ false);
- }
-
- for (size_t i = 0; i < nptrs; i++) {
- rtree_leaf_elm_t *elm = result[i].rtree_leaf;
- rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd),
- &emap->rtree, elm, /* dependent */ true);
- result[i].edata = contents.edata;
- emap_full_alloc_ctx_t alloc_ctx;
- /*
- * Not all these fields are read in practice by the metadata
- * visitor. But the compiler can easily optimize away the ones
- * that aren't, so no sense in being incomplete.
- */
- alloc_ctx.szind = contents.metadata.szind;
- alloc_ctx.slab = contents.metadata.slab;
- alloc_ctx.edata = contents.edata;
- metadata_visitor(metadata_visitor_ctx, &alloc_ctx);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_EMAP_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/emitter.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/emitter.h
deleted file mode 100644
index 9482f68b..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/emitter.h
+++ /dev/null
@@ -1,510 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EMITTER_H
-#define JEMALLOC_INTERNAL_EMITTER_H
-
-#include "jemalloc/internal/ql.h"
-
-typedef enum emitter_output_e emitter_output_t;
-enum emitter_output_e {
- emitter_output_json,
- emitter_output_json_compact,
- emitter_output_table
-};
-
-typedef enum emitter_justify_e emitter_justify_t;
-enum emitter_justify_e {
- emitter_justify_left,
- emitter_justify_right,
- /* Not for users; just to pass to internal functions. */
- emitter_justify_none
-};
-
-typedef enum emitter_type_e emitter_type_t;
-enum emitter_type_e {
- emitter_type_bool,
- emitter_type_int,
- emitter_type_int64,
- emitter_type_unsigned,
- emitter_type_uint32,
- emitter_type_uint64,
- emitter_type_size,
- emitter_type_ssize,
- emitter_type_string,
- /*
- * A title is a column title in a table; it's just a string, but it's
- * not quoted.
- */
- emitter_type_title,
-};
-
-typedef struct emitter_col_s emitter_col_t;
-struct emitter_col_s {
- /* Filled in by the user. */
- emitter_justify_t justify;
- int width;
- emitter_type_t type;
- union {
- bool bool_val;
- int int_val;
- unsigned unsigned_val;
- uint32_t uint32_val;
- uint32_t uint32_t_val;
- uint64_t uint64_val;
- uint64_t uint64_t_val;
- size_t size_val;
- ssize_t ssize_val;
- const char *str_val;
- };
-
- /* Filled in by initialization. */
- ql_elm(emitter_col_t) link;
-};
-
-typedef struct emitter_row_s emitter_row_t;
-struct emitter_row_s {
- ql_head(emitter_col_t) cols;
-};
-
-typedef struct emitter_s emitter_t;
-struct emitter_s {
- emitter_output_t output;
- /* The output information. */
- write_cb_t *write_cb;
- void *cbopaque;
- int nesting_depth;
- /* True if we've already emitted a value at the given depth. */
- bool item_at_depth;
- /* True if we emitted a key and will emit corresponding value next. */
- bool emitted_key;
-};
-
-static inline bool
-emitter_outputs_json(emitter_t *emitter) {
- return emitter->output == emitter_output_json ||
- emitter->output == emitter_output_json_compact;
-}
-
-/* Internal convenience function. Write to the emitter the given string. */
-JEMALLOC_FORMAT_PRINTF(2, 3)
-static inline void
-emitter_printf(emitter_t *emitter, const char *format, ...) {
- va_list ap;
-
- va_start(ap, format);
- malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
- va_end(ap);
-}
-
-static inline const char * JEMALLOC_FORMAT_ARG(3)
-emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
- emitter_justify_t justify, int width) {
- size_t written;
- fmt_specifier++;
- if (justify == emitter_justify_none) {
- written = malloc_snprintf(out_fmt, out_size,
- "%%%s", fmt_specifier);
- } else if (justify == emitter_justify_left) {
- written = malloc_snprintf(out_fmt, out_size,
- "%%-%d%s", width, fmt_specifier);
- } else {
- written = malloc_snprintf(out_fmt, out_size,
- "%%%d%s", width, fmt_specifier);
- }
- /* Only happens in case of bad format string, which *we* choose. */
- assert(written < out_size);
- return out_fmt;
-}
-
-/*
- * Internal. Emit the given value type in the relevant encoding (so that the
- * bool true gets mapped to json "true", but the string "true" gets mapped to
- * json "\"true\"", for instance.
- *
- * Width is ignored if justify is emitter_justify_none.
- */
-static inline void
-emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
- emitter_type_t value_type, const void *value) {
- size_t str_written;
-#define BUF_SIZE 256
-#define FMT_SIZE 10
- /*
- * We dynamically generate a format string to emit, to let us use the
- * snprintf machinery. This is kinda hacky, but gets the job done
- * quickly without having to think about the various snprintf edge
- * cases.
- */
- char fmt[FMT_SIZE];
- char buf[BUF_SIZE];
-
-#define EMIT_SIMPLE(type, format) \
- emitter_printf(emitter, \
- emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \
- *(const type *)value);
-
- switch (value_type) {
- case emitter_type_bool:
- emitter_printf(emitter,
- emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
- *(const bool *)value ? "true" : "false");
- break;
- case emitter_type_int:
- EMIT_SIMPLE(int, "%d")
- break;
- case emitter_type_int64:
- EMIT_SIMPLE(int64_t, "%" FMTd64)
- break;
- case emitter_type_unsigned:
- EMIT_SIMPLE(unsigned, "%u")
- break;
- case emitter_type_ssize:
- EMIT_SIMPLE(ssize_t, "%zd")
- break;
- case emitter_type_size:
- EMIT_SIMPLE(size_t, "%zu")
- break;
- case emitter_type_string:
- str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
- *(const char *const *)value);
- /*
- * We control the strings we output; we shouldn't get anything
- * anywhere near the fmt size.
- */
- assert(str_written < BUF_SIZE);
- emitter_printf(emitter,
- emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
- break;
- case emitter_type_uint32:
- EMIT_SIMPLE(uint32_t, "%" FMTu32)
- break;
- case emitter_type_uint64:
- EMIT_SIMPLE(uint64_t, "%" FMTu64)
- break;
- case emitter_type_title:
- EMIT_SIMPLE(char *const, "%s");
- break;
- default:
- unreachable();
- }
-#undef BUF_SIZE
-#undef FMT_SIZE
-}
-
-
-/* Internal functions. In json mode, tracks nesting state. */
-static inline void
-emitter_nest_inc(emitter_t *emitter) {
- emitter->nesting_depth++;
- emitter->item_at_depth = false;
-}
-
-static inline void
-emitter_nest_dec(emitter_t *emitter) {
- emitter->nesting_depth--;
- emitter->item_at_depth = true;
-}
-
-static inline void
-emitter_indent(emitter_t *emitter) {
- int amount = emitter->nesting_depth;
- const char *indent_str;
- assert(emitter->output != emitter_output_json_compact);
- if (emitter->output == emitter_output_json) {
- indent_str = "\t";
- } else {
- amount *= 2;
- indent_str = " ";
- }
- for (int i = 0; i < amount; i++) {
- emitter_printf(emitter, "%s", indent_str);
- }
-}
-
-static inline void
-emitter_json_key_prefix(emitter_t *emitter) {
- assert(emitter_outputs_json(emitter));
- if (emitter->emitted_key) {
- emitter->emitted_key = false;
- return;
- }
- if (emitter->item_at_depth) {
- emitter_printf(emitter, ",");
- }
- if (emitter->output != emitter_output_json_compact) {
- emitter_printf(emitter, "\n");
- emitter_indent(emitter);
- }
-}
-
-/******************************************************************************/
-/* Public functions for emitter_t. */
-
-static inline void
-emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
- write_cb_t *write_cb, void *cbopaque) {
- emitter->output = emitter_output;
- emitter->write_cb = write_cb;
- emitter->cbopaque = cbopaque;
- emitter->item_at_depth = false;
- emitter->emitted_key = false;
- emitter->nesting_depth = 0;
-}
-
-/******************************************************************************/
-/* JSON public API. */
-
-/*
- * Emits a key (e.g. as appears in an object). The next json entity emitted will
- * be the corresponding value.
- */
-static inline void
-emitter_json_key(emitter_t *emitter, const char *json_key) {
- if (emitter_outputs_json(emitter)) {
- emitter_json_key_prefix(emitter);
- emitter_printf(emitter, "\"%s\":%s", json_key,
- emitter->output == emitter_output_json_compact ? "" : " ");
- emitter->emitted_key = true;
- }
-}
-
-static inline void
-emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
- const void *value) {
- if (emitter_outputs_json(emitter)) {
- emitter_json_key_prefix(emitter);
- emitter_print_value(emitter, emitter_justify_none, -1,
- value_type, value);
- emitter->item_at_depth = true;
- }
-}
-
-/* Shorthand for calling emitter_json_key and then emitter_json_value. */
-static inline void
-emitter_json_kv(emitter_t *emitter, const char *json_key,
- emitter_type_t value_type, const void *value) {
- emitter_json_key(emitter, json_key);
- emitter_json_value(emitter, value_type, value);
-}
-
-static inline void
-emitter_json_array_begin(emitter_t *emitter) {
- if (emitter_outputs_json(emitter)) {
- emitter_json_key_prefix(emitter);
- emitter_printf(emitter, "[");
- emitter_nest_inc(emitter);
- }
-}
-
-/* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */
-static inline void
-emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
- emitter_json_key(emitter, json_key);
- emitter_json_array_begin(emitter);
-}
-
-static inline void
-emitter_json_array_end(emitter_t *emitter) {
- if (emitter_outputs_json(emitter)) {
- assert(emitter->nesting_depth > 0);
- emitter_nest_dec(emitter);
- if (emitter->output != emitter_output_json_compact) {
- emitter_printf(emitter, "\n");
- emitter_indent(emitter);
- }
- emitter_printf(emitter, "]");
- }
-}
-
-static inline void
-emitter_json_object_begin(emitter_t *emitter) {
- if (emitter_outputs_json(emitter)) {
- emitter_json_key_prefix(emitter);
- emitter_printf(emitter, "{");
- emitter_nest_inc(emitter);
- }
-}
-
-/* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */
-static inline void
-emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
- emitter_json_key(emitter, json_key);
- emitter_json_object_begin(emitter);
-}
-
-static inline void
-emitter_json_object_end(emitter_t *emitter) {
- if (emitter_outputs_json(emitter)) {
- assert(emitter->nesting_depth > 0);
- emitter_nest_dec(emitter);
- if (emitter->output != emitter_output_json_compact) {
- emitter_printf(emitter, "\n");
- emitter_indent(emitter);
- }
- emitter_printf(emitter, "}");
- }
-}
-
-
-/******************************************************************************/
-/* Table public API. */
-
-static inline void
-emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
- if (emitter->output == emitter_output_table) {
- emitter_indent(emitter);
- emitter_printf(emitter, "%s\n", table_key);
- emitter_nest_inc(emitter);
- }
-}
-
-static inline void
-emitter_table_dict_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_table) {
- emitter_nest_dec(emitter);
- }
-}
-
-static inline void
-emitter_table_kv_note(emitter_t *emitter, const char *table_key,
- emitter_type_t value_type, const void *value,
- const char *table_note_key, emitter_type_t table_note_value_type,
- const void *table_note_value) {
- if (emitter->output == emitter_output_table) {
- emitter_indent(emitter);
- emitter_printf(emitter, "%s: ", table_key);
- emitter_print_value(emitter, emitter_justify_none, -1,
- value_type, value);
- if (table_note_key != NULL) {
- emitter_printf(emitter, " (%s: ", table_note_key);
- emitter_print_value(emitter, emitter_justify_none, -1,
- table_note_value_type, table_note_value);
- emitter_printf(emitter, ")");
- }
- emitter_printf(emitter, "\n");
- }
- emitter->item_at_depth = true;
-}
-
-static inline void
-emitter_table_kv(emitter_t *emitter, const char *table_key,
- emitter_type_t value_type, const void *value) {
- emitter_table_kv_note(emitter, table_key, value_type, value, NULL,
- emitter_type_bool, NULL);
-}
-
-
-/* Write to the emitter the given string, but only in table mode. */
-JEMALLOC_FORMAT_PRINTF(2, 3)
-static inline void
-emitter_table_printf(emitter_t *emitter, const char *format, ...) {
- if (emitter->output == emitter_output_table) {
- va_list ap;
- va_start(ap, format);
- malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
- va_end(ap);
- }
-}
-
-static inline void
-emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
- if (emitter->output != emitter_output_table) {
- return;
- }
- emitter_col_t *col;
- ql_foreach(col, &row->cols, link) {
- emitter_print_value(emitter, col->justify, col->width,
- col->type, (const void *)&col->bool_val);
- }
- emitter_table_printf(emitter, "\n");
-}
-
-static inline void
-emitter_row_init(emitter_row_t *row) {
- ql_new(&row->cols);
-}
-
-static inline void
-emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
- ql_elm_new(col, link);
- ql_tail_insert(&row->cols, col, link);
-}
-
-
-/******************************************************************************/
-/*
- * Generalized public API. Emits using either JSON or table, according to
- * settings in the emitter_t. */
-
-/*
- * Note emits a different kv pair as well, but only in table mode. Omits the
- * note if table_note_key is NULL.
- */
-static inline void
-emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
- emitter_type_t value_type, const void *value,
- const char *table_note_key, emitter_type_t table_note_value_type,
- const void *table_note_value) {
- if (emitter_outputs_json(emitter)) {
- emitter_json_key(emitter, json_key);
- emitter_json_value(emitter, value_type, value);
- } else {
- emitter_table_kv_note(emitter, table_key, value_type, value,
- table_note_key, table_note_value_type, table_note_value);
- }
- emitter->item_at_depth = true;
-}
-
-static inline void
-emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
- emitter_type_t value_type, const void *value) {
- emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
- emitter_type_bool, NULL);
-}
-
-static inline void
-emitter_dict_begin(emitter_t *emitter, const char *json_key,
- const char *table_header) {
- if (emitter_outputs_json(emitter)) {
- emitter_json_key(emitter, json_key);
- emitter_json_object_begin(emitter);
- } else {
- emitter_table_dict_begin(emitter, table_header);
- }
-}
-
-static inline void
-emitter_dict_end(emitter_t *emitter) {
- if (emitter_outputs_json(emitter)) {
- emitter_json_object_end(emitter);
- } else {
- emitter_table_dict_end(emitter);
- }
-}
-
-static inline void
-emitter_begin(emitter_t *emitter) {
- if (emitter_outputs_json(emitter)) {
- assert(emitter->nesting_depth == 0);
- emitter_printf(emitter, "{");
- emitter_nest_inc(emitter);
- } else {
- /*
- * This guarantees that we always call write_cb at least once.
- * This is useful if some invariant is established by each call
- * to write_cb, but doesn't hold initially: e.g., some buffer
- * holds a null-terminated string.
- */
- emitter_printf(emitter, "%s", "");
- }
-}
-
-static inline void
-emitter_end(emitter_t *emitter) {
- if (emitter_outputs_json(emitter)) {
- assert(emitter->nesting_depth == 1);
- emitter_nest_dec(emitter);
- emitter_printf(emitter, "%s", emitter->output ==
- emitter_output_json_compact ? "}" : "\n}\n");
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_EMITTER_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/eset.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/eset.h
deleted file mode 100644
index 4f689b47..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/eset.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ESET_H
-#define JEMALLOC_INTERNAL_ESET_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/fb.h"
-#include "jemalloc/internal/edata.h"
-#include "jemalloc/internal/mutex.h"
-
-/*
- * An eset ("extent set") is a quantized collection of extents, with built-in
- * LRU queue.
- *
- * This class is not thread-safe; synchronization must be done externally if
- * there are mutating operations. One exception is the stats counters, which
- * may be read without any locking.
- */
-
-typedef struct eset_bin_s eset_bin_t;
-struct eset_bin_s {
- edata_heap_t heap;
- /*
- * We do first-fit across multiple size classes. If we compared against
- * the min element in each heap directly, we'd take a cache miss per
- * extent we looked at. If we co-locate the edata summaries, we only
- * take a miss on the edata we're actually going to return (which is
- * inevitable anyways).
- */
- edata_cmp_summary_t heap_min;
-};
-
-typedef struct eset_bin_stats_s eset_bin_stats_t;
-struct eset_bin_stats_s {
- atomic_zu_t nextents;
- atomic_zu_t nbytes;
-};
-
-typedef struct eset_s eset_t;
-struct eset_s {
- /* Bitmap for which set bits correspond to non-empty heaps. */
- fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)];
-
- /* Quantized per size class heaps of extents. */
- eset_bin_t bins[SC_NPSIZES + 1];
-
- eset_bin_stats_t bin_stats[SC_NPSIZES + 1];
-
- /* LRU of all extents in heaps. */
- edata_list_inactive_t lru;
-
- /* Page sum for all extents in heaps. */
- atomic_zu_t npages;
-
- /*
- * A duplication of the data in the containing ecache. We use this only
- * for assertions on the states of the passed-in extents.
- */
- extent_state_t state;
-};
-
-void eset_init(eset_t *eset, extent_state_t state);
-
-size_t eset_npages_get(eset_t *eset);
-/* Get the number of extents in the given page size index. */
-size_t eset_nextents_get(eset_t *eset, pszind_t ind);
-/* Get the sum total bytes of the extents in the given page size index. */
-size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
-
-void eset_insert(eset_t *eset, edata_t *edata);
-void eset_remove(eset_t *eset, edata_t *edata);
-/*
- * Select an extent from this eset of the given size and alignment. Returns
- * null if no such item could be found.
- */
-edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
- unsigned lg_max_fit);
-
-#endif /* JEMALLOC_INTERNAL_ESET_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/exp_grow.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/exp_grow.h
deleted file mode 100644
index 8566b8a4..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/exp_grow.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXP_GROW_H
-#define JEMALLOC_INTERNAL_EXP_GROW_H
-
-typedef struct exp_grow_s exp_grow_t;
-struct exp_grow_s {
- /*
- * Next extent size class in a growing series to use when satisfying a
- * request via the extent hooks (only if opt_retain). This limits the
- * number of disjoint virtual memory ranges so that extent merging can
- * be effective even if multiple arenas' extent allocation requests are
- * highly interleaved.
- *
- * retain_grow_limit is the max allowed size ind to expand (unless the
- * required size is greater). Default is no limit, and controlled
- * through mallctl only.
- */
- pszind_t next;
- pszind_t limit;
-};
-
-static inline bool
-exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min,
- size_t *r_alloc_size, pszind_t *r_skip) {
- *r_skip = 0;
- *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
- while (*r_alloc_size < alloc_size_min) {
- (*r_skip)++;
- if (exp_grow->next + *r_skip >=
- sz_psz2ind(SC_LARGE_MAXCLASS)) {
- /* Outside legal range. */
- return true;
- }
- *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
- }
- return false;
-}
-
-static inline void
-exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) {
- if (exp_grow->next + skip + 1 <= exp_grow->limit) {
- exp_grow->next += skip + 1;
- } else {
- exp_grow->next = exp_grow->limit;
- }
-
-}
-
-void exp_grow_init(exp_grow_t *exp_grow);
-
-#endif /* JEMALLOC_INTERNAL_EXP_GROW_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent.h
deleted file mode 100644
index 1d51d410..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent.h
+++ /dev/null
@@ -1,137 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_H
-#define JEMALLOC_INTERNAL_EXTENT_H
-
-#include "jemalloc/internal/ecache.h"
-#include "jemalloc/internal/ehooks.h"
-#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/rtree.h"
-
-/*
- * This module contains the page-level allocator. It chooses the addresses that
- * allocations requested by other modules will inhabit, and updates the global
- * metadata to reflect allocation/deallocation/purging decisions.
- */
-
-/*
- * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
- * is the max ratio between the size of the active extent and the new extent.
- */
-#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
-extern size_t opt_lg_extent_max_active_fit;
-
-edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
- bool zero, bool guarded);
-edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
- bool zero, bool guarded);
-void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- ecache_t *ecache, edata_t *edata);
-edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- ecache_t *ecache, size_t npages_min);
-
-void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
-void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
- edata_t *edata);
-void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- edata_t *edata);
-edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
- bool growing_retained);
-void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- edata_t *edata);
-void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- edata_t *edata);
-bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- size_t offset, size_t length);
-bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- size_t offset, size_t length);
-bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- size_t offset, size_t length);
-bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- size_t offset, size_t length);
-edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
- ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b,
- bool holding_core_locks);
-bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
- edata_t *a, edata_t *b);
-bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- bool commit, bool zero, bool growing_retained);
-size_t extent_sn_next(pac_t *pac);
-bool extent_boot(void);
-
-JEMALLOC_ALWAYS_INLINE bool
-extent_neighbor_head_state_mergeable(bool edata_is_head,
- bool neighbor_is_head, bool forward) {
- /*
- * Head states checking: disallow merging if the higher addr extent is a
- * head extent. This helps preserve first-fit, and more importantly
- * makes sure no merge across arenas.
- */
- if (forward) {
- if (neighbor_is_head) {
- return false;
- }
- } else {
- if (edata_is_head) {
- return false;
- }
- }
- return true;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
- extent_pai_t pai, extent_state_t expected_state, bool forward,
- bool expanding) {
- edata_t *neighbor = contents.edata;
- if (neighbor == NULL) {
- return false;
- }
- /* It's not safe to access *neighbor yet; must verify states first. */
- bool neighbor_is_head = contents.metadata.is_head;
- if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
- neighbor_is_head, forward)) {
- return false;
- }
- extent_state_t neighbor_state = contents.metadata.state;
- if (pai == EXTENT_PAI_PAC) {
- if (neighbor_state != expected_state) {
- return false;
- }
- /* From this point, it's safe to access *neighbor. */
- if (!expanding && (edata_committed_get(edata) !=
- edata_committed_get(neighbor))) {
- /*
- * Some platforms (e.g. Windows) require an explicit
- * commit step (and writing to uncommitted memory is not
- * allowed).
- */
- return false;
- }
- } else {
- if (neighbor_state == extent_state_active) {
- return false;
- }
- /* From this point, it's safe to access *neighbor. */
- }
-
- assert(edata_pai_get(edata) == pai);
- if (edata_pai_get(neighbor) != pai) {
- return false;
- }
- if (opt_retain) {
- assert(edata_arena_ind_get(edata) ==
- edata_arena_ind_get(neighbor));
- } else {
- if (edata_arena_ind_get(edata) !=
- edata_arena_ind_get(neighbor)) {
- return false;
- }
- }
- assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
-
- return true;
-}
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent_dss.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent_dss.h
deleted file mode 100644
index e8f02ce2..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent_dss.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
-#define JEMALLOC_INTERNAL_EXTENT_DSS_H
-
-typedef enum {
- dss_prec_disabled = 0,
- dss_prec_primary = 1,
- dss_prec_secondary = 2,
-
- dss_prec_limit = 3
-} dss_prec_t;
-#define DSS_PREC_DEFAULT dss_prec_secondary
-#define DSS_DEFAULT "secondary"
-
-extern const char *dss_prec_names[];
-
-extern const char *opt_dss;
-
-dss_prec_t extent_dss_prec_get(void);
-bool extent_dss_prec_set(dss_prec_t dss_prec);
-void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit);
-bool extent_in_dss(void *addr);
-bool extent_dss_mergeable(void *addr_a, void *addr_b);
-void extent_dss_boot(void);
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent_mmap.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent_mmap.h
deleted file mode 100644
index 55f17ee4..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/extent_mmap.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
-
-extern bool opt_retain;
-
-void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
- bool *zero, bool *commit);
-bool extent_dalloc_mmap(void *addr, size_t size);
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/fb.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/fb.h
deleted file mode 100644
index 90c4091f..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/fb.h
+++ /dev/null
@@ -1,373 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_FB_H
-#define JEMALLOC_INTERNAL_FB_H
-
-/*
- * The flat bitmap module. This has a larger API relative to the bitmap module
- * (supporting things like backwards searches, and searching for both set and
- * unset bits), at the cost of slower operations for very large bitmaps.
- *
- * Initialized flat bitmaps start at all-zeros (all bits unset).
- */
-
-typedef unsigned long fb_group_t;
-#define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3))
-#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \
- + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
-
-static inline void
-fb_init(fb_group_t *fb, size_t nbits) {
- size_t ngroups = FB_NGROUPS(nbits);
- memset(fb, 0, ngroups * sizeof(fb_group_t));
-}
-
-static inline bool
-fb_empty(fb_group_t *fb, size_t nbits) {
- size_t ngroups = FB_NGROUPS(nbits);
- for (size_t i = 0; i < ngroups; i++) {
- if (fb[i] != 0) {
- return false;
- }
- }
- return true;
-}
-
-static inline bool
-fb_full(fb_group_t *fb, size_t nbits) {
- size_t ngroups = FB_NGROUPS(nbits);
- size_t trailing_bits = nbits % FB_GROUP_BITS;
- size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1);
- for (size_t i = 0; i < limit; i++) {
- if (fb[i] != ~(fb_group_t)0) {
- return false;
- }
- }
- if (trailing_bits == 0) {
- return true;
- }
- return fb[ngroups - 1] == ((fb_group_t)1 << trailing_bits) - 1;
-}
-
-static inline bool
-fb_get(fb_group_t *fb, size_t nbits, size_t bit) {
- assert(bit < nbits);
- size_t group_ind = bit / FB_GROUP_BITS;
- size_t bit_ind = bit % FB_GROUP_BITS;
- return (bool)(fb[group_ind] & ((fb_group_t)1 << bit_ind));
-}
-
-static inline void
-fb_set(fb_group_t *fb, size_t nbits, size_t bit) {
- assert(bit < nbits);
- size_t group_ind = bit / FB_GROUP_BITS;
- size_t bit_ind = bit % FB_GROUP_BITS;
- fb[group_ind] |= ((fb_group_t)1 << bit_ind);
-}
-
-static inline void
-fb_unset(fb_group_t *fb, size_t nbits, size_t bit) {
- assert(bit < nbits);
- size_t group_ind = bit / FB_GROUP_BITS;
- size_t bit_ind = bit % FB_GROUP_BITS;
- fb[group_ind] &= ~((fb_group_t)1 << bit_ind);
-}
-
-
-/*
- * Some implementation details. This visitation function lets us apply a group
- * visitor to each group in the bitmap (potentially modifying it). The mask
- * indicates which bits are logically part of the visitation.
- */
-typedef void (*fb_group_visitor_t)(void *ctx, fb_group_t *fb, fb_group_t mask);
-JEMALLOC_ALWAYS_INLINE void
-fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx,
- size_t start, size_t cnt) {
- assert(cnt > 0);
- assert(start + cnt <= nbits);
- size_t group_ind = start / FB_GROUP_BITS;
- size_t start_bit_ind = start % FB_GROUP_BITS;
- /*
- * The first group is special; it's the only one we don't start writing
- * to from bit 0.
- */
- size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS
- ? FB_GROUP_BITS - start_bit_ind : cnt);
- /*
- * We can basically split affected words into:
- * - The first group, where we touch only the high bits
- * - The last group, where we touch only the low bits
- * - The middle, where we set all the bits to the same thing.
- * We treat each case individually. The last two could be merged, but
- * this can lead to bad codegen for those middle words.
- */
- /* First group */
- fb_group_t mask = ((~(fb_group_t)0)
- >> (FB_GROUP_BITS - first_group_cnt))
- << start_bit_ind;
- visit(ctx, &fb[group_ind], mask);
-
- cnt -= first_group_cnt;
- group_ind++;
- /* Middle groups */
- while (cnt > FB_GROUP_BITS) {
- visit(ctx, &fb[group_ind], ~(fb_group_t)0);
- cnt -= FB_GROUP_BITS;
- group_ind++;
- }
- /* Last group */
- if (cnt != 0) {
- mask = (~(fb_group_t)0) >> (FB_GROUP_BITS - cnt);
- visit(ctx, &fb[group_ind], mask);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-fb_assign_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
- bool val = *(bool *)ctx;
- if (val) {
- *fb |= mask;
- } else {
- *fb &= ~mask;
- }
-}
-
-/* Sets the cnt bits starting at position start. Must not have a 0 count. */
-static inline void
-fb_set_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
- bool val = true;
- fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
-}
-
-/* Unsets the cnt bits starting at position start. Must not have a 0 count. */
-static inline void
-fb_unset_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
- bool val = false;
- fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
- size_t *scount = (size_t *)ctx;
- *scount += popcount_lu(*fb & mask);
-}
-
-/* Finds the number of set bit in the of length cnt starting at start. */
-JEMALLOC_ALWAYS_INLINE size_t
-fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
- size_t scount = 0;
- fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt);
- return scount;
-}
-
-/* Finds the number of unset bit in the of length cnt starting at start. */
-JEMALLOC_ALWAYS_INLINE size_t
-fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
- size_t scount = fb_scount(fb, nbits, start, cnt);
- return cnt - scount;
-}
-
-/*
- * An implementation detail; find the first bit at position >= min_bit with the
- * value val.
- *
- * Returns the number of bits in the bitmap if no such bit exists.
- */
-JEMALLOC_ALWAYS_INLINE ssize_t
-fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val,
- bool forward) {
- assert(start < nbits);
- size_t ngroups = FB_NGROUPS(nbits);
- ssize_t group_ind = start / FB_GROUP_BITS;
- size_t bit_ind = start % FB_GROUP_BITS;
-
- fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1);
-
- fb_group_t group = fb[group_ind];
- group ^= maybe_invert;
- if (forward) {
- /* Only keep ones in bits bit_ind and above. */
- group &= ~((1LU << bit_ind) - 1);
- } else {
- /*
- * Only keep ones in bits bit_ind and below. You might more
- * naturally express this as (1 << (bit_ind + 1)) - 1, but
- * that shifts by an invalid amount if bit_ind is one less than
- * FB_GROUP_BITS.
- */
- group &= ((2LU << bit_ind) - 1);
- }
- ssize_t group_ind_bound = forward ? (ssize_t)ngroups : -1;
- while (group == 0) {
- group_ind += forward ? 1 : -1;
- if (group_ind == group_ind_bound) {
- return forward ? (ssize_t)nbits : (ssize_t)-1;
- }
- group = fb[group_ind];
- group ^= maybe_invert;
- }
- assert(group != 0);
- size_t bit = forward ? ffs_lu(group) : fls_lu(group);
- size_t pos = group_ind * FB_GROUP_BITS + bit;
- /*
- * The high bits of a partially filled last group are zeros, so if we're
- * looking for zeros we don't want to report an invalid result.
- */
- if (forward && !val && pos > nbits) {
- return nbits;
- }
- return pos;
-}
-
-/*
- * Find the first set bit in the bitmap with an index >= min_bit. Returns the
- * number of bits in the bitmap if no such bit exists.
- */
-static inline size_t
-fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) {
- return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false,
- /* forward */ true);
-}
-
-/* The same, but looks for an unset bit. */
-static inline size_t
-fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) {
- return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true,
- /* forward */ true);
-}
-
-/*
- * Find the last set bit in the bitmap with an index <= max_bit. Returns -1 if
- * no such bit exists.
- */
-static inline ssize_t
-fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) {
- return fb_find_impl(fb, nbits, max_bit, /* val */ false,
- /* forward */ false);
-}
-
-static inline ssize_t
-fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) {
- return fb_find_impl(fb, nbits, max_bit, /* val */ true,
- /* forward */ false);
-}
-
-/* Returns whether or not we found a range. */
-JEMALLOC_ALWAYS_INLINE bool
-fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
- size_t *r_len, bool val, bool forward) {
- assert(start < nbits);
- ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward);
- if ((forward && next_range_begin == (ssize_t)nbits)
- || (!forward && next_range_begin == (ssize_t)-1)) {
- return false;
- }
- /* Half open range; the set bits are [begin, end). */
- ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val,
- forward);
- if (forward) {
- *r_begin = next_range_begin;
- *r_len = next_range_end - next_range_begin;
- } else {
- *r_begin = next_range_end + 1;
- *r_len = next_range_begin - next_range_end;
- }
- return true;
-}
-
-/*
- * Used to iterate through ranges of set bits.
- *
- * Tries to find the next contiguous sequence of set bits with a first index >=
- * start. If one exists, puts the earliest bit of the range in *r_begin, its
- * length in *r_len, and returns true. Otherwise, returns false (without
- * touching *r_begin or *r_end).
- */
-static inline bool
-fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
- size_t *r_len) {
- return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
- /* val */ true, /* forward */ true);
-}
-
-/*
- * The same as fb_srange_iter, but searches backwards from start rather than
- * forwards. (The position returned is still the earliest bit in the range).
- */
-static inline bool
-fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
- size_t *r_len) {
- return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
- /* val */ true, /* forward */ false);
-}
-
-/* Similar to fb_srange_iter, but searches for unset bits. */
-static inline bool
-fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
- size_t *r_len) {
- return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
- /* val */ false, /* forward */ true);
-}
-
-/* Similar to fb_srange_riter, but searches for unset bits. */
-static inline bool
-fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
- size_t *r_len) {
- return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
- /* val */ false, /* forward */ false);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
- size_t begin = 0;
- size_t longest_len = 0;
- size_t len = 0;
- while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin,
- &len, val, /* forward */ true)) {
- if (len > longest_len) {
- longest_len = len;
- }
- begin += len;
- }
- return longest_len;
-}
-
-static inline size_t
-fb_srange_longest(fb_group_t *fb, size_t nbits) {
- return fb_range_longest_impl(fb, nbits, /* val */ true);
-}
-
-static inline size_t
-fb_urange_longest(fb_group_t *fb, size_t nbits) {
- return fb_range_longest_impl(fb, nbits, /* val */ false);
-}
-
-/*
- * Initializes each bit of dst with the bitwise-AND of the corresponding bits of
- * src1 and src2. All bitmaps must be the same size.
- */
-static inline void
-fb_bit_and(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
- size_t ngroups = FB_NGROUPS(nbits);
- for (size_t i = 0; i < ngroups; i++) {
- dst[i] = src1[i] & src2[i];
- }
-}
-
-/* Like fb_bit_and, but with bitwise-OR. */
-static inline void
-fb_bit_or(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
- size_t ngroups = FB_NGROUPS(nbits);
- for (size_t i = 0; i < ngroups; i++) {
- dst[i] = src1[i] | src2[i];
- }
-}
-
-/* Initializes dst bit i to the negation of source bit i. */
-static inline void
-fb_bit_not(fb_group_t *dst, fb_group_t *src, size_t nbits) {
- size_t ngroups = FB_NGROUPS(nbits);
- for (size_t i = 0; i < ngroups; i++) {
- dst[i] = ~src[i];
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_FB_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/fxp.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/fxp.h
deleted file mode 100644
index 415a9828..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/fxp.h
+++ /dev/null
@@ -1,126 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_FXP_H
-#define JEMALLOC_INTERNAL_FXP_H
-
-/*
- * A simple fixed-point math implementation, supporting only unsigned values
- * (with overflow being an error).
- *
- * It's not in general safe to use floating point in core code, because various
- * libc implementations we get linked against can assume that malloc won't touch
- * floating point state and call it with an unusual calling convention.
- */
-
-/*
- * High 16 bits are the integer part, low 16 are the fractional part. Or
- * equivalently, repr == 2**16 * val, where we use "val" to refer to the
- * (imaginary) fractional representation of the true value.
- *
- * We pick a uint32_t here since it's convenient in some places to
- * double the representation size (i.e. multiplication and division use
- * 64-bit integer types), and a uint64_t is the largest type we're
- * certain is available.
- */
-typedef uint32_t fxp_t;
-#define FXP_INIT_INT(x) ((x) << 16)
-#define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100)
-
-/*
- * Amount of precision used in parsing and printing numbers. The integer bound
- * is simply because the integer part of the number gets 16 bits, and so is
- * bounded by 65536.
- *
- * We use a lot of precision for the fractional part, even though most of it
- * gets rounded off; this lets us get exact values for the important special
- * case where the denominator is a small power of 2 (for instance,
- * 1/512 == 0.001953125 is exactly representable even with only 16 bits of
- * fractional precision). We need to left-shift by 16 before dividing by
- * 10**precision, so we pick precision to be floor(log(2**48)) = 14.
- */
-#define FXP_INTEGER_PART_DIGITS 5
-#define FXP_FRACTIONAL_PART_DIGITS 14
-
-/*
- * In addition to the integer and fractional parts of the number, we need to
- * include a null character and (possibly) a decimal point.
- */
-#define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2)
-
-static inline fxp_t
-fxp_add(fxp_t a, fxp_t b) {
- return a + b;
-}
-
-static inline fxp_t
-fxp_sub(fxp_t a, fxp_t b) {
- assert(a >= b);
- return a - b;
-}
-
-static inline fxp_t
-fxp_mul(fxp_t a, fxp_t b) {
- uint64_t unshifted = (uint64_t)a * (uint64_t)b;
- /*
- * Unshifted is (a.val * 2**16) * (b.val * 2**16)
- * == (a.val * b.val) * 2**32, but we want
- * (a.val * b.val) * 2 ** 16.
- */
- return (uint32_t)(unshifted >> 16);
-}
-
-static inline fxp_t
-fxp_div(fxp_t a, fxp_t b) {
- assert(b != 0);
- uint64_t unshifted = ((uint64_t)a << 32) / (uint64_t)b;
- /*
- * Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16)
- * == (a.val / b.val) * (2 ** 32), which again corresponds to a right
- * shift of 16.
- */
- return (uint32_t)(unshifted >> 16);
-}
-
-static inline uint32_t
-fxp_round_down(fxp_t a) {
- return a >> 16;
-}
-
-static inline uint32_t
-fxp_round_nearest(fxp_t a) {
- uint32_t fractional_part = (a & ((1U << 16) - 1));
- uint32_t increment = (uint32_t)(fractional_part >= (1U << 15));
- return (a >> 16) + increment;
-}
-
-/*
- * Approximately computes x * frac, without the size limitations that would be
- * imposed by converting u to an fxp_t.
- */
-static inline size_t
-fxp_mul_frac(size_t x_orig, fxp_t frac) {
- assert(frac <= (1U << 16));
- /*
- * Work around an over-enthusiastic warning about type limits below (on
- * 32-bit platforms, a size_t is always less than 1ULL << 48).
- */
- uint64_t x = (uint64_t)x_orig;
- /*
- * If we can guarantee no overflow, multiply first before shifting, to
- * preserve some precision. Otherwise, shift first and then multiply.
- * In the latter case, we only lose the low 16 bits of a 48-bit number,
- * so we're still accurate to within 1/2**32.
- */
- if (x < (1ULL << 48)) {
- return (size_t)((x * frac) >> 16);
- } else {
- return (size_t)((x >> 16) * (uint64_t)frac);
- }
-}
-
-/*
- * Returns true on error. Otherwise, returns false and updates *ptr to point to
- * the first character not parsed (because it wasn't a digit).
- */
-bool fxp_parse(fxp_t *a, const char *ptr, char **end);
-void fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]);
-
-#endif /* JEMALLOC_INTERNAL_FXP_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hash.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hash.h
deleted file mode 100644
index 7f945679..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hash.h
+++ /dev/null
@@ -1,320 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_HASH_H
-#define JEMALLOC_INTERNAL_HASH_H
-
-#include "jemalloc/internal/assert.h"
-
-/*
- * The following hash function is based on MurmurHash3, placed into the public
- * domain by Austin Appleby. See https://github.com/aappleby/smhasher for
- * details.
- */
-
-/******************************************************************************/
-/* Internal implementation. */
-static inline uint32_t
-hash_rotl_32(uint32_t x, int8_t r) {
- return ((x << r) | (x >> (32 - r)));
-}
-
-static inline uint64_t
-hash_rotl_64(uint64_t x, int8_t r) {
- return ((x << r) | (x >> (64 - r)));
-}
-
-static inline uint32_t
-hash_get_block_32(const uint32_t *p, int i) {
- /* Handle unaligned read. */
- if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
- uint32_t ret;
-
- memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
- return ret;
- }
-
- return p[i];
-}
-
-static inline uint64_t
-hash_get_block_64(const uint64_t *p, int i) {
- /* Handle unaligned read. */
- if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
- uint64_t ret;
-
- memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
- return ret;
- }
-
- return p[i];
-}
-
-static inline uint32_t
-hash_fmix_32(uint32_t h) {
- h ^= h >> 16;
- h *= 0x85ebca6b;
- h ^= h >> 13;
- h *= 0xc2b2ae35;
- h ^= h >> 16;
-
- return h;
-}
-
-static inline uint64_t
-hash_fmix_64(uint64_t k) {
- k ^= k >> 33;
- k *= KQU(0xff51afd7ed558ccd);
- k ^= k >> 33;
- k *= KQU(0xc4ceb9fe1a85ec53);
- k ^= k >> 33;
-
- return k;
-}
-
-static inline uint32_t
-hash_x86_32(const void *key, int len, uint32_t seed) {
- const uint8_t *data = (const uint8_t *) key;
- const int nblocks = len / 4;
-
- uint32_t h1 = seed;
-
- const uint32_t c1 = 0xcc9e2d51;
- const uint32_t c2 = 0x1b873593;
-
- /* body */
- {
- const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
- int i;
-
- for (i = -nblocks; i; i++) {
- uint32_t k1 = hash_get_block_32(blocks, i);
-
- k1 *= c1;
- k1 = hash_rotl_32(k1, 15);
- k1 *= c2;
-
- h1 ^= k1;
- h1 = hash_rotl_32(h1, 13);
- h1 = h1*5 + 0xe6546b64;
- }
- }
-
- /* tail */
- {
- const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
-
- uint32_t k1 = 0;
-
- switch (len & 3) {
- case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH;
- case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH;
- case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
- k1 *= c2; h1 ^= k1;
- }
- }
-
- /* finalization */
- h1 ^= len;
-
- h1 = hash_fmix_32(h1);
-
- return h1;
-}
-
-static inline void
-hash_x86_128(const void *key, const int len, uint32_t seed,
- uint64_t r_out[2]) {
- const uint8_t * data = (const uint8_t *) key;
- const int nblocks = len / 16;
-
- uint32_t h1 = seed;
- uint32_t h2 = seed;
- uint32_t h3 = seed;
- uint32_t h4 = seed;
-
- const uint32_t c1 = 0x239b961b;
- const uint32_t c2 = 0xab0e9789;
- const uint32_t c3 = 0x38b34ae5;
- const uint32_t c4 = 0xa1e38b93;
-
- /* body */
- {
- const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
- int i;
-
- for (i = -nblocks; i; i++) {
- uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
- uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
- uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
- uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
-
- k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
-
- h1 = hash_rotl_32(h1, 19); h1 += h2;
- h1 = h1*5 + 0x561ccd1b;
-
- k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
-
- h2 = hash_rotl_32(h2, 17); h2 += h3;
- h2 = h2*5 + 0x0bcaa747;
-
- k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
-
- h3 = hash_rotl_32(h3, 15); h3 += h4;
- h3 = h3*5 + 0x96cd1c35;
-
- k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
-
- h4 = hash_rotl_32(h4, 13); h4 += h1;
- h4 = h4*5 + 0x32ac3b17;
- }
- }
-
- /* tail */
- {
- const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
- uint32_t k1 = 0;
- uint32_t k2 = 0;
- uint32_t k3 = 0;
- uint32_t k4 = 0;
-
- switch (len & 15) {
- case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH;
- case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH;
- case 13: k4 ^= tail[12] << 0;
- k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
- JEMALLOC_FALLTHROUGH;
- case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH;
- case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH;
- case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH;
- case 9: k3 ^= tail[ 8] << 0;
- k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
- JEMALLOC_FALLTHROUGH;
- case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH;
- case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH;
- case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH;
- case 5: k2 ^= tail[ 4] << 0;
- k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
- JEMALLOC_FALLTHROUGH;
- case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH;
- case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH;
- case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH;
- case 1: k1 ^= tail[ 0] << 0;
- k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
- break;
- }
- }
-
- /* finalization */
- h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
-
- h1 += h2; h1 += h3; h1 += h4;
- h2 += h1; h3 += h1; h4 += h1;
-
- h1 = hash_fmix_32(h1);
- h2 = hash_fmix_32(h2);
- h3 = hash_fmix_32(h3);
- h4 = hash_fmix_32(h4);
-
- h1 += h2; h1 += h3; h1 += h4;
- h2 += h1; h3 += h1; h4 += h1;
-
- r_out[0] = (((uint64_t) h2) << 32) | h1;
- r_out[1] = (((uint64_t) h4) << 32) | h3;
-}
-
-static inline void
-hash_x64_128(const void *key, const int len, const uint32_t seed,
- uint64_t r_out[2]) {
- const uint8_t *data = (const uint8_t *) key;
- const int nblocks = len / 16;
-
- uint64_t h1 = seed;
- uint64_t h2 = seed;
-
- const uint64_t c1 = KQU(0x87c37b91114253d5);
- const uint64_t c2 = KQU(0x4cf5ad432745937f);
-
- /* body */
- {
- const uint64_t *blocks = (const uint64_t *) (data);
- int i;
-
- for (i = 0; i < nblocks; i++) {
- uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
- uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
-
- k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
-
- h1 = hash_rotl_64(h1, 27); h1 += h2;
- h1 = h1*5 + 0x52dce729;
-
- k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
-
- h2 = hash_rotl_64(h2, 31); h2 += h1;
- h2 = h2*5 + 0x38495ab5;
- }
- }
-
- /* tail */
- {
- const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
- uint64_t k1 = 0;
- uint64_t k2 = 0;
-
- switch (len & 15) {
- case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH;
- case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH;
- case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH;
- case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH;
- case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH;
- case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH;
- case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
- k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
- JEMALLOC_FALLTHROUGH;
- case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH;
- case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH;
- case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH;
- case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH;
- case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH;
- case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH;
- case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH;
- case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
- k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
- break;
- }
- }
-
- /* finalization */
- h1 ^= len; h2 ^= len;
-
- h1 += h2;
- h2 += h1;
-
- h1 = hash_fmix_64(h1);
- h2 = hash_fmix_64(h2);
-
- h1 += h2;
- h2 += h1;
-
- r_out[0] = h1;
- r_out[1] = h2;
-}
-
-/******************************************************************************/
-/* API. */
-static inline void
-hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
- assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
-
-#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
- hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
-#else
- {
- uint64_t hashes[2];
- hash_x86_128(key, (int)len, seed, hashes);
- r_hash[0] = (size_t)hashes[0];
- r_hash[1] = (size_t)hashes[1];
- }
-#endif
-}
-
-#endif /* JEMALLOC_INTERNAL_HASH_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hook.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hook.h
deleted file mode 100644
index ee246b1e..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hook.h
+++ /dev/null
@@ -1,163 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_HOOK_H
-#define JEMALLOC_INTERNAL_HOOK_H
-
-#include "jemalloc/internal/tsd.h"
-
-/*
- * This API is *extremely* experimental, and may get ripped out, changed in API-
- * and ABI-incompatible ways, be insufficiently or incorrectly documented, etc.
- *
- * It allows hooking the stateful parts of the API to see changes as they
- * happen.
- *
- * Allocation hooks are called after the allocation is done, free hooks are
- * called before the free is done, and expand hooks are called after the
- * allocation is expanded.
- *
- * For realloc and rallocx, if the expansion happens in place, the expansion
- * hook is called. If it is moved, then the alloc hook is called on the new
- * location, and then the free hook is called on the old location (i.e. both
- * hooks are invoked in between the alloc and the dalloc).
- *
- * If we return NULL from OOM, then usize might not be trustworthy. Calling
- * realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0)
- * only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0),
- * and only calls the alloc hook).
- *
- * Reentrancy:
- * Reentrancy is guarded against from within the hook implementation. If you
- * call allocator functions from within a hook, the hooks will not be invoked
- * again.
- * Threading:
- * The installation of a hook synchronizes with all its uses. If you can
- * prove the installation of a hook happens-before a jemalloc entry point,
- * then the hook will get invoked (unless there's a racing removal).
- *
- * Hook insertion appears to be atomic at a per-thread level (i.e. if a thread
- * allocates and has the alloc hook invoked, then a subsequent free on the
- * same thread will also have the free hook invoked).
- *
- * The *removal* of a hook does *not* block until all threads are done with
- * the hook. Hook authors have to be resilient to this, and need some
- * out-of-band mechanism for cleaning up any dynamically allocated memory
- * associated with their hook.
- * Ordering:
- * Order of hook execution is unspecified, and may be different than insertion
- * order.
- */
-
-#define HOOK_MAX 4
-
-enum hook_alloc_e {
- hook_alloc_malloc,
- hook_alloc_posix_memalign,
- hook_alloc_aligned_alloc,
- hook_alloc_calloc,
- hook_alloc_memalign,
- hook_alloc_valloc,
- hook_alloc_mallocx,
-
- /* The reallocating functions have both alloc and dalloc variants */
- hook_alloc_realloc,
- hook_alloc_rallocx,
-};
-/*
- * We put the enum typedef after the enum, since this file may get included by
- * jemalloc_cpp.cpp, and C++ disallows enum forward declarations.
- */
-typedef enum hook_alloc_e hook_alloc_t;
-
-enum hook_dalloc_e {
- hook_dalloc_free,
- hook_dalloc_dallocx,
- hook_dalloc_sdallocx,
-
- /*
- * The dalloc halves of reallocation (not called if in-place expansion
- * happens).
- */
- hook_dalloc_realloc,
- hook_dalloc_rallocx,
-};
-typedef enum hook_dalloc_e hook_dalloc_t;
-
-
-enum hook_expand_e {
- hook_expand_realloc,
- hook_expand_rallocx,
- hook_expand_xallocx,
-};
-typedef enum hook_expand_e hook_expand_t;
-
-typedef void (*hook_alloc)(
- void *extra, hook_alloc_t type, void *result, uintptr_t result_raw,
- uintptr_t args_raw[3]);
-
-typedef void (*hook_dalloc)(
- void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
-
-typedef void (*hook_expand)(
- void *extra, hook_expand_t type, void *address, size_t old_usize,
- size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
-
-typedef struct hooks_s hooks_t;
-struct hooks_s {
- hook_alloc alloc_hook;
- hook_dalloc dalloc_hook;
- hook_expand expand_hook;
- void *extra;
-};
-
-/*
- * Begin implementation details; everything above this point might one day live
- * in a public API. Everything below this point never will.
- */
-
-/*
- * The realloc pathways haven't gotten any refactoring love in a while, and it's
- * fairly difficult to pass information from the entry point to the hooks. We
- * put the informaiton the hooks will need into a struct to encapsulate
- * everything.
- *
- * Much of these pathways are force-inlined, so that the compiler can avoid
- * materializing this struct until we hit an extern arena function. For fairly
- * goofy reasons, *many* of the realloc paths hit an extern arena function.
- * These paths are cold enough that it doesn't matter; eventually, we should
- * rewrite the realloc code to make the expand-in-place and the
- * free-then-realloc paths more orthogonal, at which point we don't need to
- * spread the hook logic all over the place.
- */
-typedef struct hook_ralloc_args_s hook_ralloc_args_t;
-struct hook_ralloc_args_s {
- /* I.e. as opposed to rallocx. */
- bool is_realloc;
- /*
- * The expand hook takes 4 arguments, even if only 3 are actually used;
- * we add an extra one in case the user decides to memcpy without
- * looking too closely at the hooked function.
- */
- uintptr_t args[4];
-};
-
-/*
- * Returns an opaque handle to be used when removing the hook. NULL means that
- * we couldn't install the hook.
- */
-bool hook_boot();
-
-void *hook_install(tsdn_t *tsdn, hooks_t *hooks);
-/* Uninstalls the hook with the handle previously returned from hook_install. */
-void hook_remove(tsdn_t *tsdn, void *opaque);
-
-/* Hooks */
-
-void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
- uintptr_t args_raw[3]);
-
-void hook_invoke_dalloc(hook_dalloc_t type, void *address,
- uintptr_t args_raw[3]);
-
-void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
- size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
-
-#endif /* JEMALLOC_INTERNAL_HOOK_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa.h
deleted file mode 100644
index f3562853..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa.h
+++ /dev/null
@@ -1,182 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_HPA_H
-#define JEMALLOC_INTERNAL_HPA_H
-
-#include "jemalloc/internal/exp_grow.h"
-#include "jemalloc/internal/hpa_hooks.h"
-#include "jemalloc/internal/hpa_opts.h"
-#include "jemalloc/internal/pai.h"
-#include "jemalloc/internal/psset.h"
-
-typedef struct hpa_central_s hpa_central_t;
-struct hpa_central_s {
- /*
- * The mutex guarding most of the operations on the central data
- * structure.
- */
- malloc_mutex_t mtx;
- /*
- * Guards expansion of eden. We separate this from the regular mutex so
- * that cheaper operations can still continue while we're doing the OS
- * call.
- */
- malloc_mutex_t grow_mtx;
- /*
- * Either NULL (if empty), or some integer multiple of a
- * hugepage-aligned number of hugepages. We carve them off one at a
- * time to satisfy new pageslab requests.
- *
- * Guarded by grow_mtx.
- */
- void *eden;
- size_t eden_len;
- /* Source for metadata. */
- base_t *base;
- /* Number of grow operations done on this hpa_central_t. */
- uint64_t age_counter;
-
- /* The HPA hooks. */
- hpa_hooks_t hooks;
-};
-
-typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
-struct hpa_shard_nonderived_stats_s {
- /*
- * The number of times we've purged within a hugepage.
- *
- * Guarded by mtx.
- */
- uint64_t npurge_passes;
- /*
- * The number of individual purge calls we perform (which should always
- * be bigger than npurge_passes, since each pass purges at least one
- * extent within a hugepage.
- *
- * Guarded by mtx.
- */
- uint64_t npurges;
-
- /*
- * The number of times we've hugified a pageslab.
- *
- * Guarded by mtx.
- */
- uint64_t nhugifies;
- /*
- * The number of times we've dehugified a pageslab.
- *
- * Guarded by mtx.
- */
- uint64_t ndehugifies;
-};
-
-/* Completely derived; only used by CTL. */
-typedef struct hpa_shard_stats_s hpa_shard_stats_t;
-struct hpa_shard_stats_s {
- psset_stats_t psset_stats;
- hpa_shard_nonderived_stats_t nonderived_stats;
-};
-
-typedef struct hpa_shard_s hpa_shard_t;
-struct hpa_shard_s {
- /*
- * pai must be the first member; we cast from a pointer to it to a
- * pointer to the hpa_shard_t.
- */
- pai_t pai;
-
- /* The central allocator we get our hugepages from. */
- hpa_central_t *central;
- /* Protects most of this shard's state. */
- malloc_mutex_t mtx;
- /*
- * Guards the shard's access to the central allocator (preventing
- * multiple threads operating on this shard from accessing the central
- * allocator).
- */
- malloc_mutex_t grow_mtx;
- /* The base metadata allocator. */
- base_t *base;
-
- /*
- * This edata cache is the one we use when allocating a small extent
- * from a pageslab. The pageslab itself comes from the centralized
- * allocator, and so will use its edata_cache.
- */
- edata_cache_fast_t ecf;
-
- psset_t psset;
-
- /*
- * How many grow operations have occurred.
- *
- * Guarded by grow_mtx.
- */
- uint64_t age_counter;
-
- /* The arena ind we're associated with. */
- unsigned ind;
-
- /*
- * Our emap. This is just a cache of the emap pointer in the associated
- * hpa_central.
- */
- emap_t *emap;
-
- /* The configuration choices for this hpa shard. */
- hpa_shard_opts_t opts;
-
- /*
- * How many pages have we started but not yet finished purging in this
- * hpa shard.
- */
- size_t npending_purge;
-
- /*
- * Those stats which are copied directly into the CTL-centric hpa shard
- * stats.
- */
- hpa_shard_nonderived_stats_t stats;
-
- /*
- * Last time we performed purge on this shard.
- */
- nstime_t last_purge;
-};
-
-/*
- * Whether or not the HPA can be used given the current configuration. This is
- * is not necessarily a guarantee that it backs its allocations by hugepages,
- * just that it can function properly given the system it's running on.
- */
-bool hpa_supported();
-bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
-bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
- base_t *base, edata_cache_t *edata_cache, unsigned ind,
- const hpa_shard_opts_t *opts);
-
-void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
-void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
- hpa_shard_stats_t *dst);
-
-/*
- * Notify the shard that we won't use it for allocations much longer. Due to
- * the possibility of races, we don't actually prevent allocations; just flush
- * and disable the embedded edata_cache_small.
- */
-void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
-void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
-
-void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
- bool deferral_allowed);
-void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
-
-/*
- * We share the fork ordering with the PA and arena prefork handling; that's why
- * these are 3 and 4 rather than 0 and 1.
- */
-void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
-void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
-void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
-void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
-
-#endif /* JEMALLOC_INTERNAL_HPA_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa_hooks.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa_hooks.h
deleted file mode 100644
index 4ea221cb..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa_hooks.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H
-#define JEMALLOC_INTERNAL_HPA_HOOKS_H
-
-typedef struct hpa_hooks_s hpa_hooks_t;
-struct hpa_hooks_s {
- void *(*map)(size_t size);
- void (*unmap)(void *ptr, size_t size);
- void (*purge)(void *ptr, size_t size);
- void (*hugify)(void *ptr, size_t size);
- void (*dehugify)(void *ptr, size_t size);
- void (*curtime)(nstime_t *r_time, bool first_reading);
- uint64_t (*ms_since)(nstime_t *r_time);
-};
-
-extern hpa_hooks_t hpa_hooks_default;
-
-#endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa_opts.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa_opts.h
deleted file mode 100644
index ee84fea1..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpa_opts.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
-#define JEMALLOC_INTERNAL_HPA_OPTS_H
-
-#include "jemalloc/internal/fxp.h"
-
-/*
- * This file is morally part of hpa.h, but is split out for header-ordering
- * reasons.
- */
-
-typedef struct hpa_shard_opts_s hpa_shard_opts_t;
-struct hpa_shard_opts_s {
- /*
- * The largest size we'll allocate out of the shard. For those
- * allocations refused, the caller (in practice, the PA module) will
- * fall back to the more general (for now) PAC, which can always handle
- * any allocation request.
- */
- size_t slab_max_alloc;
-
- /*
- * When the number of active bytes in a hugepage is >=
- * hugification_threshold, we force hugify it.
- */
- size_t hugification_threshold;
-
- /*
- * The HPA purges whenever the number of pages exceeds dirty_mult *
- * active_pages. This may be set to (fxp_t)-1 to disable purging.
- */
- fxp_t dirty_mult;
-
- /*
- * Whether or not the PAI methods are allowed to defer work to a
- * subsequent hpa_shard_do_deferred_work() call. Practically, this
- * corresponds to background threads being enabled. We track this
- * ourselves for encapsulation purposes.
- */
- bool deferral_allowed;
-
- /*
- * How long a hugepage has to be a hugification candidate before it will
- * actually get hugified.
- */
- uint64_t hugify_delay_ms;
-
- /*
- * Minimum amount of time between purges.
- */
- uint64_t min_purge_interval_ms;
-};
-
-#define HPA_SHARD_OPTS_DEFAULT { \
- /* slab_max_alloc */ \
- 64 * 1024, \
- /* hugification_threshold */ \
- HUGEPAGE * 95 / 100, \
- /* dirty_mult */ \
- FXP_INIT_PERCENT(25), \
- /* \
- * deferral_allowed \
- * \
- * Really, this is always set by the arena during creation \
- * or by an hpa_shard_set_deferral_allowed call, so the value \
- * we put here doesn't matter. \
- */ \
- false, \
- /* hugify_delay_ms */ \
- 10 * 1000, \
- /* min_purge_interval_ms */ \
- 5 * 1000 \
-}
-
-#endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpdata.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpdata.h
deleted file mode 100644
index 1fb534db..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/hpdata.h
+++ /dev/null
@@ -1,413 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_HPDATA_H
-#define JEMALLOC_INTERNAL_HPDATA_H
-
-#include "jemalloc/internal/fb.h"
-#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/typed_list.h"
-
-/*
- * The metadata representation we use for extents in hugepages. While the PAC
- * uses the edata_t to represent both active and inactive extents, the HP only
- * uses the edata_t for active ones; instead, inactive extent state is tracked
- * within hpdata associated with the enclosing hugepage-sized, hugepage-aligned
- * region of virtual address space.
- *
- * An hpdata need not be "truly" backed by a hugepage (which is not necessarily
- * an observable property of any given region of address space). It's just
- * hugepage-sized and hugepage-aligned; it's *potentially* huge.
- */
-typedef struct hpdata_s hpdata_t;
-ph_structs(hpdata_age_heap, hpdata_t);
-struct hpdata_s {
- /*
- * We likewise follow the edata convention of mangling names and forcing
- * the use of accessors -- this lets us add some consistency checks on
- * access.
- */
-
- /*
- * The address of the hugepage in question. This can't be named h_addr,
- * since that conflicts with a macro defined in Windows headers.
- */
- void *h_address;
- /* Its age (measured in psset operations). */
- uint64_t h_age;
- /* Whether or not we think the hugepage is mapped that way by the OS. */
- bool h_huge;
-
- /*
- * For some properties, we keep parallel sets of bools; h_foo_allowed
- * and h_in_psset_foo_container. This is a decoupling mechanism to
- * avoid bothering the hpa (which manages policies) from the psset
- * (which is the mechanism used to enforce those policies). This allows
- * all the container management logic to live in one place, without the
- * HPA needing to know or care how that happens.
- */
-
- /*
- * Whether or not the hpdata is allowed to be used to serve allocations,
- * and whether or not the psset is currently tracking it as such.
- */
- bool h_alloc_allowed;
- bool h_in_psset_alloc_container;
-
- /*
- * The same, but with purging. There's no corresponding
- * h_in_psset_purge_container, because the psset (currently) always
- * removes hpdatas from their containers during updates (to implement
- * LRU for purging).
- */
- bool h_purge_allowed;
-
- /* And with hugifying. */
- bool h_hugify_allowed;
- /* When we became a hugification candidate. */
- nstime_t h_time_hugify_allowed;
- bool h_in_psset_hugify_container;
-
- /* Whether or not a purge or hugify is currently happening. */
- bool h_mid_purge;
- bool h_mid_hugify;
-
- /*
- * Whether or not the hpdata is being updated in the psset (i.e. if
- * there has been a psset_update_begin call issued without a matching
- * psset_update_end call). Eventually this will expand to other types
- * of updates.
- */
- bool h_updating;
-
- /* Whether or not the hpdata is in a psset. */
- bool h_in_psset;
-
- union {
- /* When nonempty (and also nonfull), used by the psset bins. */
- hpdata_age_heap_link_t age_link;
- /*
- * When empty (or not corresponding to any hugepage), list
- * linkage.
- */
- ql_elm(hpdata_t) ql_link_empty;
- };
-
- /*
- * Linkage for the psset to track candidates for purging and hugifying.
- */
- ql_elm(hpdata_t) ql_link_purge;
- ql_elm(hpdata_t) ql_link_hugify;
-
- /* The length of the largest contiguous sequence of inactive pages. */
- size_t h_longest_free_range;
-
- /* Number of active pages. */
- size_t h_nactive;
-
- /* A bitmap with bits set in the active pages. */
- fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
-
- /*
- * Number of dirty or active pages, and a bitmap tracking them. One
- * way to think of this is as which pages are dirty from the OS's
- * perspective.
- */
- size_t h_ntouched;
-
- /* The touched pages (using the same definition as above). */
- fb_group_t touched_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
-};
-
-TYPED_LIST(hpdata_empty_list, hpdata_t, ql_link_empty)
-TYPED_LIST(hpdata_purge_list, hpdata_t, ql_link_purge)
-TYPED_LIST(hpdata_hugify_list, hpdata_t, ql_link_hugify)
-
-ph_proto(, hpdata_age_heap, hpdata_t);
-
-static inline void *
-hpdata_addr_get(const hpdata_t *hpdata) {
- return hpdata->h_address;
-}
-
-static inline void
-hpdata_addr_set(hpdata_t *hpdata, void *addr) {
- assert(HUGEPAGE_ADDR2BASE(addr) == addr);
- hpdata->h_address = addr;
-}
-
-static inline uint64_t
-hpdata_age_get(const hpdata_t *hpdata) {
- return hpdata->h_age;
-}
-
-static inline void
-hpdata_age_set(hpdata_t *hpdata, uint64_t age) {
- hpdata->h_age = age;
-}
-
-static inline bool
-hpdata_huge_get(const hpdata_t *hpdata) {
- return hpdata->h_huge;
-}
-
-static inline bool
-hpdata_alloc_allowed_get(const hpdata_t *hpdata) {
- return hpdata->h_alloc_allowed;
-}
-
-static inline void
-hpdata_alloc_allowed_set(hpdata_t *hpdata, bool alloc_allowed) {
- hpdata->h_alloc_allowed = alloc_allowed;
-}
-
-static inline bool
-hpdata_in_psset_alloc_container_get(const hpdata_t *hpdata) {
- return hpdata->h_in_psset_alloc_container;
-}
-
-static inline void
-hpdata_in_psset_alloc_container_set(hpdata_t *hpdata, bool in_container) {
- assert(in_container != hpdata->h_in_psset_alloc_container);
- hpdata->h_in_psset_alloc_container = in_container;
-}
-
-static inline bool
-hpdata_purge_allowed_get(const hpdata_t *hpdata) {
- return hpdata->h_purge_allowed;
-}
-
-static inline void
-hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) {
- assert(purge_allowed == false || !hpdata->h_mid_purge);
- hpdata->h_purge_allowed = purge_allowed;
-}
-
-static inline bool
-hpdata_hugify_allowed_get(const hpdata_t *hpdata) {
- return hpdata->h_hugify_allowed;
-}
-
-static inline void
-hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) {
- assert(!hpdata->h_mid_hugify);
- hpdata->h_hugify_allowed = true;
- hpdata->h_time_hugify_allowed = now;
-}
-
-static inline nstime_t
-hpdata_time_hugify_allowed(hpdata_t *hpdata) {
- return hpdata->h_time_hugify_allowed;
-}
-
-static inline void
-hpdata_disallow_hugify(hpdata_t *hpdata) {
- hpdata->h_hugify_allowed = false;
-}
-
-static inline bool
-hpdata_in_psset_hugify_container_get(const hpdata_t *hpdata) {
- return hpdata->h_in_psset_hugify_container;
-}
-
-static inline void
-hpdata_in_psset_hugify_container_set(hpdata_t *hpdata, bool in_container) {
- assert(in_container != hpdata->h_in_psset_hugify_container);
- hpdata->h_in_psset_hugify_container = in_container;
-}
-
-static inline bool
-hpdata_mid_purge_get(const hpdata_t *hpdata) {
- return hpdata->h_mid_purge;
-}
-
-static inline void
-hpdata_mid_purge_set(hpdata_t *hpdata, bool mid_purge) {
- assert(mid_purge != hpdata->h_mid_purge);
- hpdata->h_mid_purge = mid_purge;
-}
-
-static inline bool
-hpdata_mid_hugify_get(const hpdata_t *hpdata) {
- return hpdata->h_mid_hugify;
-}
-
-static inline void
-hpdata_mid_hugify_set(hpdata_t *hpdata, bool mid_hugify) {
- assert(mid_hugify != hpdata->h_mid_hugify);
- hpdata->h_mid_hugify = mid_hugify;
-}
-
-static inline bool
-hpdata_changing_state_get(const hpdata_t *hpdata) {
- return hpdata->h_mid_purge || hpdata->h_mid_hugify;
-}
-
-
-static inline bool
-hpdata_updating_get(const hpdata_t *hpdata) {
- return hpdata->h_updating;
-}
-
-static inline void
-hpdata_updating_set(hpdata_t *hpdata, bool updating) {
- assert(updating != hpdata->h_updating);
- hpdata->h_updating = updating;
-}
-
-static inline bool
-hpdata_in_psset_get(const hpdata_t *hpdata) {
- return hpdata->h_in_psset;
-}
-
-static inline void
-hpdata_in_psset_set(hpdata_t *hpdata, bool in_psset) {
- assert(in_psset != hpdata->h_in_psset);
- hpdata->h_in_psset = in_psset;
-}
-
-static inline size_t
-hpdata_longest_free_range_get(const hpdata_t *hpdata) {
- return hpdata->h_longest_free_range;
-}
-
-static inline void
-hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) {
- assert(longest_free_range <= HUGEPAGE_PAGES);
- hpdata->h_longest_free_range = longest_free_range;
-}
-
-static inline size_t
-hpdata_nactive_get(hpdata_t *hpdata) {
- return hpdata->h_nactive;
-}
-
-static inline size_t
-hpdata_ntouched_get(hpdata_t *hpdata) {
- return hpdata->h_ntouched;
-}
-
-static inline size_t
-hpdata_ndirty_get(hpdata_t *hpdata) {
- return hpdata->h_ntouched - hpdata->h_nactive;
-}
-
-static inline size_t
-hpdata_nretained_get(hpdata_t *hpdata) {
- return HUGEPAGE_PAGES - hpdata->h_ntouched;
-}
-
-static inline void
-hpdata_assert_empty(hpdata_t *hpdata) {
- assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES));
- assert(hpdata->h_nactive == 0);
-}
-
-/*
- * Only used in tests, and in hpdata_assert_consistent, below. Verifies some
- * consistency properties of the hpdata (e.g. that cached counts of page stats
- * match computed ones).
- */
-static inline bool
-hpdata_consistent(hpdata_t *hpdata) {
- if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
- != hpdata_longest_free_range_get(hpdata)) {
- return false;
- }
- if (fb_scount(hpdata->active_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
- != hpdata->h_nactive) {
- return false;
- }
- if (fb_scount(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
- != hpdata->h_ntouched) {
- return false;
- }
- if (hpdata->h_ntouched < hpdata->h_nactive) {
- return false;
- }
- if (hpdata->h_huge && hpdata->h_ntouched != HUGEPAGE_PAGES) {
- return false;
- }
- if (hpdata_changing_state_get(hpdata)
- && ((hpdata->h_purge_allowed) || hpdata->h_hugify_allowed)) {
- return false;
- }
- if (hpdata_hugify_allowed_get(hpdata)
- != hpdata_in_psset_hugify_container_get(hpdata)) {
- return false;
- }
- return true;
-}
-
-static inline void
-hpdata_assert_consistent(hpdata_t *hpdata) {
- assert(hpdata_consistent(hpdata));
-}
-
-static inline bool
-hpdata_empty(hpdata_t *hpdata) {
- return hpdata->h_nactive == 0;
-}
-
-static inline bool
-hpdata_full(hpdata_t *hpdata) {
- return hpdata->h_nactive == HUGEPAGE_PAGES;
-}
-
-void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age);
-
-/*
- * Given an hpdata which can serve an allocation request, pick and reserve an
- * offset within that allocation.
- */
-void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
-void hpdata_unreserve(hpdata_t *hpdata, void *begin, size_t sz);
-
-/*
- * The hpdata_purge_prepare_t allows grabbing the metadata required to purge
- * subranges of a hugepage while holding a lock, drop the lock during the actual
- * purging of them, and reacquire it to update the metadata again.
- */
-typedef struct hpdata_purge_state_s hpdata_purge_state_t;
-struct hpdata_purge_state_s {
- size_t npurged;
- size_t ndirty_to_purge;
- fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)];
- size_t next_purge_search_begin;
-};
-
-/*
- * Initializes purge state. The access to hpdata must be externally
- * synchronized with other hpdata_* calls.
- *
- * You can tell whether or not a thread is purging or hugifying a given hpdata
- * via hpdata_changing_state_get(hpdata). Racing hugification or purging
- * operations aren't allowed.
- *
- * Once you begin purging, you have to follow through and call hpdata_purge_next
- * until you're done, and then end. Allocating out of an hpdata undergoing
- * purging is not allowed.
- *
- * Returns the number of dirty pages that will be purged.
- */
-size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
-
-/*
- * If there are more extents to purge, sets *r_purge_addr and *r_purge_size to
- * true, and returns true. Otherwise, returns false to indicate that we're
- * done.
- *
- * This requires exclusive access to the purge state, but *not* to the hpdata.
- * In particular, unreserve calls are allowed while purging (i.e. you can dalloc
- * into one part of the hpdata while purging a different part).
- */
-bool hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
- void **r_purge_addr, size_t *r_purge_size);
-/*
- * Updates the hpdata metadata after all purging is done. Needs external
- * synchronization.
- */
-void hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
-
-void hpdata_hugify(hpdata_t *hpdata);
-void hpdata_dehugify(hpdata_t *hpdata);
-
-#endif /* JEMALLOC_INTERNAL_HPDATA_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/inspect.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/inspect.h
deleted file mode 100644
index 65fef51d..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/inspect.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_INSPECT_H
-#define JEMALLOC_INTERNAL_INSPECT_H
-
-/*
- * This module contains the heap introspection capabilities. For now they are
- * exposed purely through mallctl APIs in the experimental namespace, but this
- * may change over time.
- */
-
-/*
- * The following two structs are for experimental purposes. See
- * experimental_utilization_query_ctl and
- * experimental_utilization_batch_query_ctl in src/ctl.c.
- */
-typedef struct inspect_extent_util_stats_s inspect_extent_util_stats_t;
-struct inspect_extent_util_stats_s {
- size_t nfree;
- size_t nregs;
- size_t size;
-};
-
-typedef struct inspect_extent_util_stats_verbose_s
- inspect_extent_util_stats_verbose_t;
-
-struct inspect_extent_util_stats_verbose_s {
- void *slabcur_addr;
- size_t nfree;
- size_t nregs;
- size_t size;
- size_t bin_nfree;
- size_t bin_nregs;
-};
-
-void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size);
-void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size,
- size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
-
-#endif /* JEMALLOC_INTERNAL_INSPECT_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_decls.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_decls.h
deleted file mode 100644
index 983027c8..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_decls.h
+++ /dev/null
@@ -1,108 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_DECLS_H
-#define JEMALLOC_INTERNAL_DECLS_H
-
-#include <math.h>
-#ifdef _WIN32
-# include <windows.h>
-# include "msvc_compat/windows_extra.h"
-# include "msvc_compat/strings.h"
-# ifdef _WIN64
-# if LG_VADDR <= 32
-# error Generate the headers using x64 vcargs
-# endif
-# else
-# if LG_VADDR > 32
-# undef LG_VADDR
-# define LG_VADDR 32
-# endif
-# endif
-#else
-# include <sys/param.h>
-# include <sys/mman.h>
-# if !defined(__pnacl__) && !defined(__native_client__)
-# include <sys/syscall.h>
-# if !defined(SYS_write) && defined(__NR_write)
-# define SYS_write __NR_write
-# endif
-# if defined(SYS_open) && defined(__aarch64__)
- /* Android headers may define SYS_open to __NR_open even though
- * __NR_open may not exist on AArch64 (superseded by __NR_openat). */
-# undef SYS_open
-# endif
-# include <sys/uio.h>
-# endif
-# include <pthread.h>
-# if defined(__FreeBSD__) || defined(__DragonFly__)
-# include <pthread_np.h>
-# include <sched.h>
-# if defined(__FreeBSD__)
-# define cpu_set_t cpuset_t
-# endif
-# endif
-# include <signal.h>
-# ifdef JEMALLOC_OS_UNFAIR_LOCK
-# include <os/lock.h>
-# endif
-# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
-# include <sched.h>
-# endif
-# include <errno.h>
-# include <sys/time.h>
-# include <time.h>
-# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-# include <mach/mach_time.h>
-# endif
-#endif
-#include <sys/types.h>
-
-#include <limits.h>
-#ifndef SIZE_T_MAX
-# define SIZE_T_MAX SIZE_MAX
-#endif
-#ifndef SSIZE_MAX
-# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
-#endif
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <stddef.h>
-#ifndef offsetof
-# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
-#endif
-#include <string.h>
-#include <strings.h>
-#include <ctype.h>
-#ifdef _MSC_VER
-# include <io.h>
-typedef intptr_t ssize_t;
-# define PATH_MAX 1024
-# define STDERR_FILENO 2
-# define __func__ __FUNCTION__
-# ifdef JEMALLOC_HAS_RESTRICT
-# define restrict __restrict
-# endif
-/* Disable warnings about deprecated system functions. */
-# pragma warning(disable: 4996)
-#if _MSC_VER < 1800
-static int
-isblank(int c) {
- return (c == '\t' || c == ' ');
-}
-#endif
-#else
-# include <unistd.h>
-#endif
-#include <fcntl.h>
-
-/*
- * The Win32 midl compiler has #define small char; we don't use midl, but
- * "small" is a nice identifier to have available when talking about size
- * classes.
- */
-#ifdef small
-# undef small
-#endif
-
-#endif /* JEMALLOC_INTERNAL_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_defs.h.in b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_defs.h.in
deleted file mode 100644
index 3588072f..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ /dev/null
@@ -1,427 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_DEFS_H_
-#define JEMALLOC_INTERNAL_DEFS_H_
-/*
- * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
- * public APIs to be prefixed. This makes it possible, with some care, to use
- * multiple allocators simultaneously.
- */
-#undef JEMALLOC_PREFIX
-#undef JEMALLOC_CPREFIX
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_FREE
-#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
-#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
-#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
-#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
-
-/*
- * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
- * For shared libraries, symbol visibility mechanisms prevent these symbols
- * from being exported, but for static libraries, naming collisions are a real
- * possibility.
- */
-#undef JEMALLOC_PRIVATE_NAMESPACE
-
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU.
- */
-#undef CPU_SPINWAIT
-/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
-#undef HAVE_CPU_SPINWAIT
-
-/*
- * Number of significant bits in virtual addresses. This may be less than the
- * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
- * bits are the same as bit 47.
- */
-#undef LG_VADDR
-
-/* Defined if C11 atomics are available. */
-#undef JEMALLOC_C11_ATOMICS
-
-/* Defined if GCC __atomic atomics are available. */
-#undef JEMALLOC_GCC_ATOMIC_ATOMICS
-/* and the 8-bit variant support. */
-#undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS
-
-/* Defined if GCC __sync atomics are available. */
-#undef JEMALLOC_GCC_SYNC_ATOMICS
-/* and the 8-bit variant support. */
-#undef JEMALLOC_GCC_U8_SYNC_ATOMICS
-
-/*
- * Defined if __builtin_clz() and __builtin_clzl() are available.
- */
-#undef JEMALLOC_HAVE_BUILTIN_CLZ
-
-/*
- * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
- */
-#undef JEMALLOC_OS_UNFAIR_LOCK
-
-/* Defined if syscall(2) is usable. */
-#undef JEMALLOC_USE_SYSCALL
-
-/*
- * Defined if secure_getenv(3) is available.
- */
-#undef JEMALLOC_HAVE_SECURE_GETENV
-
-/*
- * Defined if issetugid(2) is available.
- */
-#undef JEMALLOC_HAVE_ISSETUGID
-
-/* Defined if pthread_atfork(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_ATFORK
-
-/* Defined if pthread_setname_np(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
-
-/* Defined if pthread_getname_np(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP
-
-/* Defined if pthread_get_name_np(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
-
-/*
- * Defined if mach_absolute_time() is available.
- */
-#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-
-/*
- * Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_REALTIME
-
-/*
- * Defined if _malloc_thread_cleanup() exists. At least in the case of
- * FreeBSD, pthread_key_create() allocates, which if used during malloc
- * bootstrapping will cause recursion into the pthreads library. Therefore, if
- * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
- * malloc_tsd.
- */
-#undef JEMALLOC_MALLOC_THREAD_CLEANUP
-
-/*
- * Defined if threaded initialization is known to be safe on this platform.
- * Among other things, it must be possible to initialize a mutex without
- * triggering allocation in order for threaded allocation to be safe.
- */
-#undef JEMALLOC_THREADED_INIT
-
-/*
- * Defined if the pthreads implementation defines
- * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
- * to avoid recursive allocation during mutex initialization.
- */
-#undef JEMALLOC_MUTEX_INIT_CB
-
-/* Non-empty if the tls_model attribute is supported. */
-#undef JEMALLOC_TLS_MODEL
-
-/*
- * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
- * inline functions.
- */
-#undef JEMALLOC_DEBUG
-
-/* JEMALLOC_STATS enables statistics calculation. */
-#undef JEMALLOC_STATS
-
-/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
-#undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
-
-/* JEMALLOC_PROF enables allocation profiling. */
-#undef JEMALLOC_PROF
-
-/* Use libunwind for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBUNWIND
-
-/* Use libgcc for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBGCC
-
-/* Use gcc intrinsics for profile backtracing if defined. */
-#undef JEMALLOC_PROF_GCC
-
-/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
- * segment (DSS).
- */
-#undef JEMALLOC_DSS
-
-/* Support memory filling (junk/zero). */
-#undef JEMALLOC_FILL
-
-/* Support utrace(2)-based tracing. */
-#undef JEMALLOC_UTRACE
-
-/* Support utrace(2)-based tracing (label based signature). */
-#undef JEMALLOC_UTRACE_LABEL
-
-/* Support optional abort() on OOM. */
-#undef JEMALLOC_XMALLOC
-
-/* Support lazy locking (avoid locking unless a second thread is launched). */
-#undef JEMALLOC_LAZY_LOCK
-
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-#undef LG_QUANTUM
-
-/* One page is 2^LG_PAGE bytes. */
-#undef LG_PAGE
-
-/* Maximum number of regions in a slab. */
-#undef CONFIG_LG_SLAB_MAXREGS
-
-/*
- * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
- * system does not explicitly support huge pages; system calls that require
- * explicit huge page support are separately configured.
- */
-#undef LG_HUGEPAGE
-
-/*
- * If defined, adjacent virtual memory mappings with identical attributes
- * automatically coalesce, and they fragment when changes are made to subranges.
- * This is the normal order of things for mmap()/munmap(), but on Windows
- * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
- * mappings do *not* coalesce/fragment.
- */
-#undef JEMALLOC_MAPS_COALESCE
-
-/*
- * If defined, retain memory for later reuse by default rather than using e.g.
- * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
- * common sequences of mmap()/munmap() calls will cause virtual memory map
- * holes.
- */
-#undef JEMALLOC_RETAIN
-
-/* TLS is used to map arenas and magazine caches to threads. */
-#undef JEMALLOC_TLS
-
-/*
- * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
- * Don't use this directly; instead use unreachable() from util.h
- */
-#undef JEMALLOC_INTERNAL_UNREACHABLE
-
-/*
- * ffs*() functions to use for bitmapping. Don't use these directly; instead,
- * use ffs_*() from util.h.
- */
-#undef JEMALLOC_INTERNAL_FFSLL
-#undef JEMALLOC_INTERNAL_FFSL
-#undef JEMALLOC_INTERNAL_FFS
-
-/*
- * popcount*() functions to use for bitmapping.
- */
-#undef JEMALLOC_INTERNAL_POPCOUNTL
-#undef JEMALLOC_INTERNAL_POPCOUNT
-
-/*
- * If defined, explicitly attempt to more uniformly distribute large allocation
- * pointer alignments across all cache indices.
- */
-#undef JEMALLOC_CACHE_OBLIVIOUS
-
-/*
- * If defined, enable logging facilities. We make this a configure option to
- * avoid taking extra branches everywhere.
- */
-#undef JEMALLOC_LOG
-
-/*
- * If defined, use readlinkat() (instead of readlink()) to follow
- * /etc/malloc_conf.
- */
-#undef JEMALLOC_READLINKAT
-
-/*
- * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
- */
-#undef JEMALLOC_ZONE
-
-/*
- * Methods for determining whether the OS overcommits.
- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
- * /proc/sys/vm.overcommit_memory file.
- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
- */
-#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-
-/* Defined if madvise(2) is available. */
-#undef JEMALLOC_HAVE_MADVISE
-
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-#undef JEMALLOC_HAVE_MADVISE_HUGE
-
-/*
- * Methods for purging unused pages differ between operating systems.
- *
- * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
- * will be discarded rather than swapped out.
- * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
- * defined, this immediately discards pages,
- * such that new pages will be demand-zeroed if
- * the address region is later touched;
- * otherwise this behaves similarly to
- * MADV_FREE, though typically with higher
- * system overhead.
- */
-#undef JEMALLOC_PURGE_MADVISE_FREE
-#undef JEMALLOC_PURGE_MADVISE_DONTNEED
-#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
-
-/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
-#undef JEMALLOC_DEFINE_MADVISE_FREE
-
-/*
- * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
- */
-#undef JEMALLOC_MADVISE_DONTDUMP
-
-/*
- * Defined if MADV_[NO]CORE is supported as an argument to madvise.
- */
-#undef JEMALLOC_MADVISE_NOCORE
-
-/* Defined if mprotect(2) is available. */
-#undef JEMALLOC_HAVE_MPROTECT
-
-/*
- * Defined if transparent huge pages (THPs) are supported via the
- * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
- */
-#undef JEMALLOC_THP
-
-/* Defined if posix_madvise is available. */
-#undef JEMALLOC_HAVE_POSIX_MADVISE
-
-/*
- * Method for purging unused pages using posix_madvise.
- *
- * posix_madvise(..., POSIX_MADV_DONTNEED)
- */
-#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED
-#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS
-
-/*
- * Defined if memcntl page admin call is supported
- */
-#undef JEMALLOC_HAVE_MEMCNTL
-
-/*
- * Defined if malloc_size is supported
- */
-#undef JEMALLOC_HAVE_MALLOC_SIZE
-
-/* Define if operating system has alloca.h header. */
-#undef JEMALLOC_HAS_ALLOCA_H
-
-/* C99 restrict keyword supported. */
-#undef JEMALLOC_HAS_RESTRICT
-
-/* For use by hash code. */
-#undef JEMALLOC_BIG_ENDIAN
-
-/* sizeof(int) == 2^LG_SIZEOF_INT. */
-#undef LG_SIZEOF_INT
-
-/* sizeof(long) == 2^LG_SIZEOF_LONG. */
-#undef LG_SIZEOF_LONG
-
-/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
-#undef LG_SIZEOF_LONG_LONG
-
-/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
-#undef LG_SIZEOF_INTMAX_T
-
-/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
-#undef JEMALLOC_GLIBC_MALLOC_HOOK
-
-/* glibc memalign hook. */
-#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
-
-/* pthread support */
-#undef JEMALLOC_HAVE_PTHREAD
-
-/* dlsym() support */
-#undef JEMALLOC_HAVE_DLSYM
-
-/* Adaptive mutex support in pthreads. */
-#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
-
-/* GNU specific sched_getcpu support */
-#undef JEMALLOC_HAVE_SCHED_GETCPU
-
-/* GNU specific sched_setaffinity support */
-#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
-
-/*
- * If defined, all the features necessary for background threads are present.
- */
-#undef JEMALLOC_BACKGROUND_THREAD
-
-/*
- * If defined, jemalloc symbols are not exported (doesn't work when
- * JEMALLOC_PREFIX is not defined).
- */
-#undef JEMALLOC_EXPORT
-
-/* config.malloc_conf options string. */
-#undef JEMALLOC_CONFIG_MALLOC_CONF
-
-/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
-#undef JEMALLOC_IS_MALLOC
-
-/*
- * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
- */
-#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
-
-/* Performs additional safety checks when defined. */
-#undef JEMALLOC_OPT_SAFETY_CHECKS
-
-/* Is C++ support being built? */
-#undef JEMALLOC_ENABLE_CXX
-
-/* Performs additional size checks when defined. */
-#undef JEMALLOC_OPT_SIZE_CHECKS
-
-/* Allows sampled junk and stash for checking use-after-free when defined. */
-#undef JEMALLOC_UAF_DETECTION
-
-/* Darwin VM_MAKE_TAG support */
-#undef JEMALLOC_HAVE_VM_MAKE_TAG
-
-/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
-#undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
-
-#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_externs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_externs.h
deleted file mode 100644
index fc834c67..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_externs.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTERNS_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/hpa_opts.h"
-#include "jemalloc/internal/sec_opts.h"
-#include "jemalloc/internal/tsd_types.h"
-#include "jemalloc/internal/nstime.h"
-
-/* TSD checks this to set thread local slow state accordingly. */
-extern bool malloc_slow;
-
-/* Run-time options. */
-extern bool opt_abort;
-extern bool opt_abort_conf;
-extern bool opt_trust_madvise;
-extern bool opt_confirm_conf;
-extern bool opt_hpa;
-extern hpa_shard_opts_t opt_hpa_opts;
-extern sec_opts_t opt_hpa_sec_opts;
-
-extern const char *opt_junk;
-extern bool opt_junk_alloc;
-extern bool opt_junk_free;
-extern void (*junk_free_callback)(void *ptr, size_t size);
-extern void (*junk_alloc_callback)(void *ptr, size_t size);
-extern bool opt_utrace;
-extern bool opt_xmalloc;
-extern bool opt_experimental_infallible_new;
-extern bool opt_zero;
-extern unsigned opt_narenas;
-extern zero_realloc_action_t opt_zero_realloc_action;
-extern malloc_init_t malloc_init_state;
-extern const char *zero_realloc_mode_names[];
-extern atomic_zu_t zero_realloc_count;
-extern bool opt_cache_oblivious;
-
-/* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */
-extern uintptr_t san_cache_bin_nonfast_mask;
-
-/* Number of CPUs. */
-extern unsigned ncpus;
-
-/* Number of arenas used for automatic multiplexing of threads and arenas. */
-extern unsigned narenas_auto;
-
-/* Base index for manual arenas. */
-extern unsigned manual_arena_base;
-
-/*
- * Arenas that are used to service external requests. Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-extern atomic_p_t arenas[];
-
-void *a0malloc(size_t size);
-void a0dalloc(void *ptr);
-void *bootstrap_malloc(size_t size);
-void *bootstrap_calloc(size_t num, size_t size);
-void bootstrap_free(void *ptr);
-void arena_set(unsigned ind, arena_t *arena);
-unsigned narenas_total_get(void);
-arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
-arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
-void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena);
-void iarena_cleanup(tsd_t *tsd);
-void arena_cleanup(tsd_t *tsd);
-size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
-void jemalloc_prefork(void);
-void jemalloc_postfork_parent(void);
-void jemalloc_postfork_child(void);
-void je_sdallocx_noflags(void *ptr, size_t size);
-void *malloc_default(size_t size);
-
-#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_includes.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_includes.h
deleted file mode 100644
index 751c112f..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_includes.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_INCLUDES_H
-#define JEMALLOC_INTERNAL_INCLUDES_H
-
-/*
- * jemalloc can conceptually be broken into components (arena, tcache, etc.),
- * but there are circular dependencies that cannot be broken without
- * substantial performance degradation.
- *
- * Historically, we dealt with this by each header into four sections (types,
- * structs, externs, and inlines), and included each header file multiple times
- * in this file, picking out the portion we want on each pass using the
- * following #defines:
- * JEMALLOC_H_TYPES : Preprocessor-defined constants and pseudo-opaque data
- * types.
- * JEMALLOC_H_STRUCTS : Data structures.
- * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
- * JEMALLOC_H_INLINES : Inline functions.
- *
- * We're moving toward a world in which the dependencies are explicit; each file
- * will #include the headers it depends on (rather than relying on them being
- * implicitly available via this file including every header file in the
- * project).
- *
- * We're now in an intermediate state: we've broken up the header files to avoid
- * having to include each one multiple times, but have not yet moved the
- * dependency information into the header files (i.e. we still rely on the
- * ordering in this file to ensure all a header's dependencies are available in
- * its translation unit). Each component is now broken up into multiple header
- * files, corresponding to the sections above (e.g. instead of "foo.h", we now
- * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
- *
- * Those files which have been converted to explicitly include their
- * inter-component dependencies are now in the initial HERMETIC HEADERS
- * section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
- * must be included first in every translation unit) for system headers and
- * global jemalloc definitions, however.
- */
-
-/******************************************************************************/
-/* TYPES */
-/******************************************************************************/
-
-#include "jemalloc/internal/arena_types.h"
-#include "jemalloc/internal/tcache_types.h"
-#include "jemalloc/internal/prof_types.h"
-
-/******************************************************************************/
-/* STRUCTS */
-/******************************************************************************/
-
-#include "jemalloc/internal/prof_structs.h"
-#include "jemalloc/internal/arena_structs.h"
-#include "jemalloc/internal/tcache_structs.h"
-#include "jemalloc/internal/background_thread_structs.h"
-
-/******************************************************************************/
-/* EXTERNS */
-/******************************************************************************/
-
-#include "jemalloc/internal/jemalloc_internal_externs.h"
-#include "jemalloc/internal/arena_externs.h"
-#include "jemalloc/internal/large_externs.h"
-#include "jemalloc/internal/tcache_externs.h"
-#include "jemalloc/internal/prof_externs.h"
-#include "jemalloc/internal/background_thread_externs.h"
-
-/******************************************************************************/
-/* INLINES */
-/******************************************************************************/
-
-#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
-/*
- * Include portions of arena code interleaved with tcache code in order to
- * resolve circular dependencies.
- */
-#include "jemalloc/internal/arena_inlines_a.h"
-#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
-#include "jemalloc/internal/tcache_inlines.h"
-#include "jemalloc/internal/arena_inlines_b.h"
-#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
-#include "jemalloc/internal/prof_inlines.h"
-#include "jemalloc/internal/background_thread_inlines.h"
-
-#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_a.h
deleted file mode 100644
index 9e27cc30..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_a.h
+++ /dev/null
@@ -1,122 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_INLINES_A_H
-#define JEMALLOC_INTERNAL_INLINES_A_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/ticker.h"
-
-JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
-malloc_getcpu(void) {
- assert(have_percpu_arena);
-#if defined(_WIN32)
- return GetCurrentProcessorNumber();
-#elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
- return (malloc_cpuid_t)sched_getcpu();
-#else
- not_reached();
- return -1;
-#endif
-}
-
-/* Return the chosen arena index based on current cpu. */
-JEMALLOC_ALWAYS_INLINE unsigned
-percpu_arena_choose(void) {
- assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
-
- malloc_cpuid_t cpuid = malloc_getcpu();
- assert(cpuid >= 0);
-
- unsigned arena_ind;
- if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
- 2)) {
- arena_ind = cpuid;
- } else {
- assert(opt_percpu_arena == per_phycpu_arena);
- /* Hyper threads on the same physical CPU share arena. */
- arena_ind = cpuid - ncpus / 2;
- }
-
- return arena_ind;
-}
-
-/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
-JEMALLOC_ALWAYS_INLINE unsigned
-percpu_arena_ind_limit(percpu_arena_mode_t mode) {
- assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
- if (mode == per_phycpu_arena && ncpus > 1) {
- if (ncpus % 2) {
- /* This likely means a misconfig. */
- return ncpus / 2 + 1;
- }
- return ncpus / 2;
- } else {
- return ncpus;
- }
-}
-
-static inline arena_t *
-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
- arena_t *ret;
-
- assert(ind < MALLOCX_ARENA_LIMIT);
-
- ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
- if (unlikely(ret == NULL)) {
- if (init_if_missing) {
- ret = arena_init(tsdn, ind, &arena_config_default);
- }
- }
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tcache_available(tsd_t *tsd) {
- /*
- * Thread specific auto tcache might be unavailable if: 1) during tcache
- * initialization, or 2) disabled through thread.tcache.enabled mallctl
- * or config options. This check covers all cases.
- */
- if (likely(tsd_tcache_enabled_get(tsd))) {
- /* Associated arena == NULL implies tcache init in progress. */
- if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
- tcache_assert_initialized(tsd_tcachep_get(tsd));
- }
- return true;
- }
-
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcache_get(tsd_t *tsd) {
- if (!tcache_available(tsd)) {
- return NULL;
- }
-
- return tsd_tcachep_get(tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_slow_t *
-tcache_slow_get(tsd_t *tsd) {
- if (!tcache_available(tsd)) {
- return NULL;
- }
-
- return tsd_tcache_slowp_get(tsd);
-}
-
-static inline void
-pre_reentrancy(tsd_t *tsd, arena_t *arena) {
- /* arena is the current context. Reentry from a0 is not allowed. */
- assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
- tsd_pre_reentrancy_raw(tsd);
-}
-
-static inline void
-post_reentrancy(tsd_t *tsd) {
- tsd_post_reentrancy_raw(tsd);
-}
-
-#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_b.h
deleted file mode 100644
index 152f8a03..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_b.h
+++ /dev/null
@@ -1,103 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_INLINES_B_H
-#define JEMALLOC_INTERNAL_INLINES_B_H
-
-#include "jemalloc/internal/extent.h"
-
-static inline void
-percpu_arena_update(tsd_t *tsd, unsigned cpu) {
- assert(have_percpu_arena);
- arena_t *oldarena = tsd_arena_get(tsd);
- assert(oldarena != NULL);
- unsigned oldind = arena_ind_get(oldarena);
-
- if (oldind != cpu) {
- unsigned newind = cpu;
- arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
- assert(newarena != NULL);
-
- /* Set new arena/tcache associations. */
- arena_migrate(tsd, oldarena, newarena);
- tcache_t *tcache = tcache_get(tsd);
- if (tcache != NULL) {
- tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
- tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
- tcache, newarena);
- }
- }
-}
-
-
-/* Choose an arena based on a per-thread value. */
-static inline arena_t *
-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
- arena_t *ret;
-
- if (arena != NULL) {
- return arena;
- }
-
- /* During reentrancy, arena 0 is the safest bet. */
- if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
- return arena_get(tsd_tsdn(tsd), 0, true);
- }
-
- ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
- if (unlikely(ret == NULL)) {
- ret = arena_choose_hard(tsd, internal);
- assert(ret);
- if (tcache_available(tsd)) {
- tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
- tcache_t *tcache = tsd_tcachep_get(tsd);
- if (tcache_slow->arena != NULL) {
- /* See comments in tsd_tcache_data_init().*/
- assert(tcache_slow->arena ==
- arena_get(tsd_tsdn(tsd), 0, false));
- if (tcache_slow->arena != ret) {
- tcache_arena_reassociate(tsd_tsdn(tsd),
- tcache_slow, tcache, ret);
- }
- } else {
- tcache_arena_associate(tsd_tsdn(tsd),
- tcache_slow, tcache, ret);
- }
- }
- }
-
- /*
- * Note that for percpu arena, if the current arena is outside of the
- * auto percpu arena range, (i.e. thread is assigned to a manually
- * managed arena), then percpu arena is skipped.
- */
- if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
- !internal && (arena_ind_get(ret) <
- percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
- tsd_tsdn(tsd))) {
- unsigned ind = percpu_arena_choose();
- if (arena_ind_get(ret) != ind) {
- percpu_arena_update(tsd, ind);
- ret = tsd_arena_get(tsd);
- }
- ret->last_thd = tsd_tsdn(tsd);
- }
-
- return ret;
-}
-
-static inline arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena) {
- return arena_choose_impl(tsd, arena, false);
-}
-
-static inline arena_t *
-arena_ichoose(tsd_t *tsd, arena_t *arena) {
- return arena_choose_impl(tsd, arena, true);
-}
-
-static inline bool
-arena_is_auto(arena_t *arena) {
- assert(narenas_auto > 0);
-
- return (arena_ind_get(arena) < manual_arena_base);
-}
-
-#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_c.h
deleted file mode 100644
index b0868b7d..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ /dev/null
@@ -1,340 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_INLINES_C_H
-#define JEMALLOC_INTERNAL_INLINES_C_H
-
-#include "jemalloc/internal/hook.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/log.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/thread_event.h"
-#include "jemalloc/internal/witness.h"
-
-/*
- * Translating the names of the 'i' functions:
- * Abbreviations used in the first part of the function name (before
- * alloc/dalloc) describe what that function accomplishes:
- * a: arena (query)
- * s: size (query, or sized deallocation)
- * e: extent (query)
- * p: aligned (allocates)
- * vs: size (query, without knowing that the pointer is into the heap)
- * r: rallocx implementation
- * x: xallocx implementation
- * Abbreviations used in the second part of the function name (after
- * alloc/dalloc) describe the arguments it takes
- * z: whether to return zeroed memory
- * t: accepts a tcache_t * parameter
- * m: accepts an arena_t * parameter
- */
-
-JEMALLOC_ALWAYS_INLINE arena_t *
-iaalloc(tsdn_t *tsdn, const void *ptr) {
- assert(ptr != NULL);
-
- return arena_aalloc(tsdn, ptr);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const void *ptr) {
- assert(ptr != NULL);
-
- return arena_salloc(tsdn, ptr);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
- bool is_internal, arena_t *arena, bool slow_path) {
- void *ret;
-
- assert(!is_internal || tcache == NULL);
- assert(!is_internal || arena == NULL || arena_is_auto(arena));
- if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
- }
-
- ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
- if (config_stats && is_internal && likely(ret != NULL)) {
- arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
- }
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
- return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
- NULL, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, bool is_internal, arena_t *arena) {
- void *ret;
-
- assert(usize != 0);
- assert(usize == sz_sa2u(usize, alignment));
- assert(!is_internal || tcache == NULL);
- assert(!is_internal || arena == NULL || arena_is_auto(arena));
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
- assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
- if (config_stats && is_internal && likely(ret != NULL)) {
- arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
- }
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena) {
- return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
- return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
- tcache_get(tsd), false, NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr) {
- return arena_vsalloc(tsdn, ptr);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
- emap_alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) {
- assert(ptr != NULL);
- assert(!is_internal || tcache == NULL);
- assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
- if (config_stats && is_internal) {
- arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
- }
- if (!is_internal && !tsdn_null(tsdn) &&
- tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
- assert(tcache == NULL);
- }
- arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloc(tsd_t *tsd, void *ptr) {
- idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- emap_alloc_ctx_t *alloc_ctx, bool slow_path) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
- arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
- hook_ralloc_args_t *hook_args) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
- void *p;
- size_t usize, copysize;
-
- usize = sz_sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
- return NULL;
- }
- p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
- if (p == NULL) {
- return NULL;
- }
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- memcpy(p, ptr, copysize);
- hook_invoke_alloc(hook_args->is_realloc
- ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
- hook_args->args);
- hook_invoke_dalloc(hook_args->is_realloc
- ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
- isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
- return p;
-}
-
-/*
- * is_realloc threads through the knowledge of whether or not this call comes
- * from je_realloc (as opposed to je_rallocx); this ensures that we pass the
- * correct entry point into any hooks.
- * Note that these functions are all force-inlined, so no actual bool gets
- * passed-around anywhere.
- */
-JEMALLOC_ALWAYS_INLINE void *
-iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
- bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
-{
- assert(ptr != NULL);
- assert(size != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
- != 0) {
- /*
- * Existing object alignment is inadequate; allocate new space
- * and copy.
- */
- return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
- zero, tcache, arena, hook_args);
- }
-
- return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
- tcache, hook_args);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
- bool zero, hook_ralloc_args_t *hook_args) {
- return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
- tcache_get(tsd), NULL, hook_args);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, size_t *newsize) {
- assert(ptr != NULL);
- assert(size != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
- != 0) {
- /* Existing object alignment is inadequate. */
- *newsize = oldsize;
- return true;
- }
-
- return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
- newsize);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after,
- cache_bin_t *bin, void *ret) {
- thread_allocated_set(tsd, allocated_after);
- if (config_stats) {
- bin->tstats.nrequests++;
- }
-
- LOG("core.malloc.exit", "result: %p", ret);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-malloc_initialized(void) {
- return (malloc_init_state == malloc_init_initialized);
-}
-
-/*
- * malloc() fastpath. Included here so that we can inline it into operator new;
- * function call overhead there is non-negligible as a fraction of total CPU in
- * allocation-heavy C++ programs. We take the fallback alloc to allow malloc
- * (which can return NULL) to differ in its behavior from operator new (which
- * can't). It matches the signature of malloc / operator new so that we can
- * tail-call the fallback allocator, allowing us to avoid setting up the call
- * frame in the common case.
- *
- * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
- * tcache. If either of these is false, we tail-call to the slowpath,
- * malloc_default(). Tail-calling is used to avoid any caller-saved
- * registers.
- *
- * fastpath supports ticker and profiling, both of which will also
- * tail-call to the slowpath if they fire.
- */
-JEMALLOC_ALWAYS_INLINE void *
-imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
- LOG("core.malloc.entry", "size: %zu", size);
- if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
- return fallback_alloc(size);
- }
-
- tsd_t *tsd = tsd_get(false);
- if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) {
- return fallback_alloc(size);
- }
- /*
- * The code below till the branch checking the next_event threshold may
- * execute before malloc_init(), in which case the threshold is 0 to
- * trigger slow path and initialization.
- *
- * Note that when uninitialized, only the fast-path variants of the sz /
- * tsd facilities may be called.
- */
- szind_t ind;
- /*
- * The thread_allocated counter in tsd serves as a general purpose
- * accumulator for bytes of allocation to trigger different types of
- * events. usize is always needed to advance thread_allocated, though
- * it's not always needed in the core allocation logic.
- */
- size_t usize;
- sz_size2index_usize_fastpath(size, &ind, &usize);
- /* Fast path relies on size being a bin. */
- assert(ind < SC_NBINS);
- assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) &&
- (size <= SC_SMALL_MAXCLASS));
-
- uint64_t allocated, threshold;
- te_malloc_fastpath_ctx(tsd, &allocated, &threshold);
- uint64_t allocated_after = allocated + usize;
- /*
- * The ind and usize might be uninitialized (or partially) before
- * malloc_init(). The assertions check for: 1) full correctness (usize
- * & ind) when initialized; and 2) guaranteed slow-path (threshold == 0)
- * when !initialized.
- */
- if (!malloc_initialized()) {
- assert(threshold == 0);
- } else {
- assert(ind == sz_size2index(size));
- assert(usize > 0 && usize == sz_index2size(ind));
- }
- /*
- * Check for events and tsd non-nominal (fast_threshold will be set to
- * 0) in a single branch.
- */
- if (unlikely(allocated_after >= threshold)) {
- return fallback_alloc(size);
- }
- assert(tsd_fast(tsd));
-
- tcache_t *tcache = tsd_tcachep_get(tsd);
- assert(tcache == tcache_get(tsd));
- cache_bin_t *bin = &tcache->bins[ind];
- bool tcache_success;
- void *ret;
-
- /*
- * We split up the code this way so that redundant low-water
- * computation doesn't happen on the (more common) case in which we
- * don't touch the low water mark. The compiler won't do this
- * duplication on its own.
- */
- ret = cache_bin_alloc_easy(bin, &tcache_success);
- if (tcache_success) {
- fastpath_success_finish(tsd, allocated_after, bin, ret);
- return ret;
- }
- ret = cache_bin_alloc(bin, &tcache_success);
- if (tcache_success) {
- fastpath_success_finish(tsd, allocated_after, bin, ret);
- return ret;
- }
-
- return fallback_alloc(size);
-}
-
-#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_macros.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_macros.h
deleted file mode 100644
index e97b5f90..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_macros.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MACROS_H
-#define JEMALLOC_INTERNAL_MACROS_H
-
-#ifdef JEMALLOC_DEBUG
-# define JEMALLOC_ALWAYS_INLINE static inline
-#else
-# ifdef _MSC_VER
-# define JEMALLOC_ALWAYS_INLINE static __forceinline
-# else
-# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
-# endif
-#endif
-#ifdef _MSC_VER
-# define inline _inline
-#endif
-
-#define UNUSED JEMALLOC_ATTR(unused)
-
-#define ZU(z) ((size_t)z)
-#define ZD(z) ((ssize_t)z)
-#define QU(q) ((uint64_t)q)
-#define QD(q) ((int64_t)q)
-
-#define KZU(z) ZU(z##ULL)
-#define KZD(z) ZD(z##LL)
-#define KQU(q) QU(q##ULL)
-#define KQD(q) QI(q##LL)
-
-#ifndef __DECONST
-# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
-#endif
-
-#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
-# define restrict
-#endif
-
-/* Various function pointers are static and immutable except during testing. */
-#ifdef JEMALLOC_JET
-# define JET_MUTABLE
-#else
-# define JET_MUTABLE const
-#endif
-
-#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
-#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
-
-/* Diagnostic suppression macros */
-#if defined(_MSC_VER) && !defined(__clang__)
-# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
-# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
-# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
-# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
-# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
-# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
-# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-/* #pragma GCC diagnostic first appeared in gcc 4.6. */
-#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \
- (__GNUC_MINOR__ > 5)))) || defined(__clang__)
-/*
- * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
- * diagnostic suppression macros and should not be used anywhere else.
- */
-# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
-# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
-# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
-# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
- JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
-
-/*
- * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and
- * all clang versions up to version 7 (currently trunk, unreleased). This macro
- * suppresses the warning for the affected compiler versions only.
- */
-# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \
- defined(__clang__)
-# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
- JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
-# else
-# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
-# endif
-
-# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
- JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
-# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
- JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
-# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
-# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
- JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
-# else
-# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
-# endif
-# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
- JEMALLOC_DIAGNOSTIC_PUSH \
- JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
-#else
-# define JEMALLOC_DIAGNOSTIC_PUSH
-# define JEMALLOC_DIAGNOSTIC_POP
-# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
-# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
-# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
-# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
-# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-#endif
-
-/*
- * Disables spurious diagnostics for all headers. Since these headers are not
- * included by users directly, it does not affect their diagnostic settings.
- */
-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-
-#endif /* JEMALLOC_INTERNAL_MACROS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_types.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_types.h
deleted file mode 100644
index 62c2b59c..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_internal_types.h
+++ /dev/null
@@ -1,130 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TYPES_H
-#define JEMALLOC_INTERNAL_TYPES_H
-
-#include "jemalloc/internal/quantum.h"
-
-/* Processor / core id type. */
-typedef int malloc_cpuid_t;
-
-/* When realloc(non-null-ptr, 0) is called, what happens? */
-enum zero_realloc_action_e {
- /* Realloc(ptr, 0) is free(ptr); return malloc(0); */
- zero_realloc_action_alloc = 0,
- /* Realloc(ptr, 0) is free(ptr); */
- zero_realloc_action_free = 1,
- /* Realloc(ptr, 0) aborts. */
- zero_realloc_action_abort = 2
-};
-typedef enum zero_realloc_action_e zero_realloc_action_t;
-
-/* Signature of write callback. */
-typedef void (write_cb_t)(void *, const char *);
-
-enum malloc_init_e {
- malloc_init_uninitialized = 3,
- malloc_init_a0_initialized = 2,
- malloc_init_recursible = 1,
- malloc_init_initialized = 0 /* Common case --> jnz. */
-};
-typedef enum malloc_init_e malloc_init_t;
-
-/*
- * Flags bits:
- *
- * a: arena
- * t: tcache
- * 0: unused
- * z: zero
- * n: alignment
- *
- * aaaaaaaa aaaatttt tttttttt 0znnnnnn
- */
-#define MALLOCX_ARENA_BITS 12
-#define MALLOCX_TCACHE_BITS 12
-#define MALLOCX_LG_ALIGN_BITS 6
-#define MALLOCX_ARENA_SHIFT 20
-#define MALLOCX_TCACHE_SHIFT 8
-#define MALLOCX_ARENA_MASK \
- (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
-/* NB: Arena index bias decreases the maximum number of arenas by 1. */
-#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
-#define MALLOCX_TCACHE_MASK \
- (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
-#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
-#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
-/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
-#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
- (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
-#define MALLOCX_ALIGN_GET(flags) \
- (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
-#define MALLOCX_ZERO_GET(flags) \
- ((bool)(flags & MALLOCX_ZERO))
-
-#define MALLOCX_TCACHE_GET(flags) \
- (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
-#define MALLOCX_ARENA_GET(flags) \
- (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
-
-/* Smallest size class to support. */
-#define TINY_MIN (1U << LG_TINY_MIN)
-
-#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
-#define LONG_MASK (LONG - 1)
-
-/* Return the smallest long multiple that is >= a. */
-#define LONG_CEILING(a) \
- (((a) + LONG_MASK) & ~LONG_MASK)
-
-#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
-#define PTR_MASK (SIZEOF_PTR - 1)
-
-/* Return the smallest (void *) multiple that is >= a. */
-#define PTR_CEILING(a) \
- (((a) + PTR_MASK) & ~PTR_MASK)
-
-/*
- * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
- * In addition, this controls the spacing of cacheline-spaced size classes.
- *
- * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
- * only handle raw constants.
- */
-#define LG_CACHELINE 6
-#define CACHELINE 64
-#define CACHELINE_MASK (CACHELINE - 1)
-
-/* Return the smallest cacheline multiple that is >= s. */
-#define CACHELINE_CEILING(s) \
- (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
-
-/* Return the nearest aligned address at or below a. */
-#define ALIGNMENT_ADDR2BASE(a, alignment) \
- ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
-
-/* Return the offset between a and the nearest aligned address at or below a. */
-#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
- ((size_t)((uintptr_t)(a) & (alignment - 1)))
-
-/* Return the smallest alignment multiple that is >= s. */
-#define ALIGNMENT_CEILING(s, alignment) \
- (((s) + (alignment - 1)) & ((~(alignment)) + 1))
-
-/* Declare a variable-length array. */
-#if __STDC_VERSION__ < 199901L
-# ifdef _MSC_VER
-# include <malloc.h>
-# define alloca _alloca
-# else
-# ifdef JEMALLOC_HAS_ALLOCA_H
-# include <alloca.h>
-# else
-# include <stdlib.h>
-# endif
-# endif
-# define VARIABLE_ARRAY(type, name, count) \
- type *name = alloca(sizeof(type) * (count))
-#else
-# define VARIABLE_ARRAY(type, name, count) type name[(count)]
-#endif
-
-#endif /* JEMALLOC_INTERNAL_TYPES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_preamble.h.in b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_preamble.h.in
deleted file mode 100644
index 5ce77d96..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/jemalloc_preamble.h.in
+++ /dev/null
@@ -1,263 +0,0 @@
-#ifndef JEMALLOC_PREAMBLE_H
-#define JEMALLOC_PREAMBLE_H
-
-#include "jemalloc_internal_defs.h"
-#include "jemalloc/internal/jemalloc_internal_decls.h"
-
-#if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
-#include <sys/ktrace.h>
-# if defined(JEMALLOC_UTRACE)
-# define UTRACE_CALL(p, l) utrace(p, l)
-# else
-# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
-# define JEMALLOC_UTRACE
-# endif
-#endif
-
-#define JEMALLOC_NO_DEMANGLE
-#ifdef JEMALLOC_JET
-# undef JEMALLOC_IS_MALLOC
-# define JEMALLOC_N(n) jet_##n
-# include "jemalloc/internal/public_namespace.h"
-# define JEMALLOC_NO_RENAME
-# include "../jemalloc@install_suffix@.h"
-# undef JEMALLOC_NO_RENAME
-#else
-# define JEMALLOC_N(n) @private_namespace@##n
-# include "../jemalloc@install_suffix@.h"
-#endif
-
-#if defined(JEMALLOC_OSATOMIC)
-#include <libkern/OSAtomic.h>
-#endif
-
-#ifdef JEMALLOC_ZONE
-#include <mach/mach_error.h>
-#include <mach/mach_init.h>
-#include <mach/vm_map.h>
-#endif
-
-#include "jemalloc/internal/jemalloc_internal_macros.h"
-
-/*
- * Note that the ordering matters here; the hook itself is name-mangled. We
- * want the inclusion of hooks to happen early, so that we hook as much as
- * possible.
- */
-#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
-# ifndef JEMALLOC_JET
-# include "jemalloc/internal/private_namespace.h"
-# else
-# include "jemalloc/internal/private_namespace_jet.h"
-# endif
-#endif
-#include "jemalloc/internal/test_hooks.h"
-
-#ifdef JEMALLOC_DEFINE_MADVISE_FREE
-# define JEMALLOC_MADV_FREE 8
-#endif
-
-static const bool config_debug =
-#ifdef JEMALLOC_DEBUG
- true
-#else
- false
-#endif
- ;
-static const bool have_dss =
-#ifdef JEMALLOC_DSS
- true
-#else
- false
-#endif
- ;
-static const bool have_madvise_huge =
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
- true
-#else
- false
-#endif
- ;
-static const bool config_fill =
-#ifdef JEMALLOC_FILL
- true
-#else
- false
-#endif
- ;
-static const bool config_lazy_lock =
-#ifdef JEMALLOC_LAZY_LOCK
- true
-#else
- false
-#endif
- ;
-static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
-static const bool config_prof =
-#ifdef JEMALLOC_PROF
- true
-#else
- false
-#endif
- ;
-static const bool config_prof_libgcc =
-#ifdef JEMALLOC_PROF_LIBGCC
- true
-#else
- false
-#endif
- ;
-static const bool config_prof_libunwind =
-#ifdef JEMALLOC_PROF_LIBUNWIND
- true
-#else
- false
-#endif
- ;
-static const bool maps_coalesce =
-#ifdef JEMALLOC_MAPS_COALESCE
- true
-#else
- false
-#endif
- ;
-static const bool config_stats =
-#ifdef JEMALLOC_STATS
- true
-#else
- false
-#endif
- ;
-static const bool config_tls =
-#ifdef JEMALLOC_TLS
- true
-#else
- false
-#endif
- ;
-static const bool config_utrace =
-#ifdef JEMALLOC_UTRACE
- true
-#else
- false
-#endif
- ;
-static const bool config_xmalloc =
-#ifdef JEMALLOC_XMALLOC
- true
-#else
- false
-#endif
- ;
-static const bool config_cache_oblivious =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
- true
-#else
- false
-#endif
- ;
-/*
- * Undocumented, for jemalloc development use only at the moment. See the note
- * in jemalloc/internal/log.h.
- */
-static const bool config_log =
-#ifdef JEMALLOC_LOG
- true
-#else
- false
-#endif
- ;
-/*
- * Are extra safety checks enabled; things like checking the size of sized
- * deallocations, double-frees, etc.
- */
-static const bool config_opt_safety_checks =
-#ifdef JEMALLOC_OPT_SAFETY_CHECKS
- true
-#elif defined(JEMALLOC_DEBUG)
- /*
- * This lets us only guard safety checks by one flag instead of two; fast
- * checks can guard solely by config_opt_safety_checks and run in debug mode
- * too.
- */
- true
-#else
- false
-#endif
- ;
-
-/*
- * Extra debugging of sized deallocations too onerous to be included in the
- * general safety checks.
- */
-static const bool config_opt_size_checks =
-#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
- true
-#else
- false
-#endif
- ;
-
-static const bool config_uaf_detection =
-#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
- true
-#else
- false
-#endif
- ;
-
-/* Whether or not the C++ extensions are enabled. */
-static const bool config_enable_cxx =
-#ifdef JEMALLOC_ENABLE_CXX
- true
-#else
- false
-#endif
-;
-
-#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
-/* Currently percpu_arena depends on sched_getcpu. */
-#define JEMALLOC_PERCPU_ARENA
-#endif
-static const bool have_percpu_arena =
-#ifdef JEMALLOC_PERCPU_ARENA
- true
-#else
- false
-#endif
- ;
-/*
- * Undocumented, and not recommended; the application should take full
- * responsibility for tracking provenance.
- */
-static const bool force_ivsalloc =
-#ifdef JEMALLOC_FORCE_IVSALLOC
- true
-#else
- false
-#endif
- ;
-static const bool have_background_thread =
-#ifdef JEMALLOC_BACKGROUND_THREAD
- true
-#else
- false
-#endif
- ;
-static const bool config_high_res_timer =
-#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
- true
-#else
- false
-#endif
- ;
-
-static const bool have_memcntl =
-#ifdef JEMALLOC_HAVE_MEMCNTL
- true
-#else
- false
-#endif
- ;
-
-#endif /* JEMALLOC_PREAMBLE_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/large_externs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/large_externs.h
deleted file mode 100644
index 8e09122d..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/large_externs.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
-#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
-
-#include "jemalloc/internal/hook.h"
-
-void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
-void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero);
-bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
- size_t usize_max, bool zero);
-void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache,
- hook_ralloc_args_t *hook_args);
-
-void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
-void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
-void large_dalloc(tsdn_t *tsdn, edata_t *edata);
-size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
-void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
- bool reset_recent);
-void large_prof_tctx_reset(edata_t *edata);
-void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);
-
-#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/lockedint.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/lockedint.h
deleted file mode 100644
index d020ebec..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/lockedint.h
+++ /dev/null
@@ -1,204 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_LOCKEDINT_H
-#define JEMALLOC_INTERNAL_LOCKEDINT_H
-
-/*
- * In those architectures that support 64-bit atomics, we use atomic updates for
- * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
- * externally.
- */
-
-typedef struct locked_u64_s locked_u64_t;
-#ifdef JEMALLOC_ATOMIC_U64
-struct locked_u64_s {
- atomic_u64_t val;
-};
-#else
-/* Must hold the associated mutex. */
-struct locked_u64_s {
- uint64_t val;
-};
-#endif
-
-typedef struct locked_zu_s locked_zu_t;
-struct locked_zu_s {
- atomic_zu_t val;
-};
-
-#ifndef JEMALLOC_ATOMIC_U64
-# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
-# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \
- malloc_mutex_init(&(mu), name, rank, rank_mode)
-# define LOCKEDINT_MTX(mtx) (&(mtx))
-# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
-# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
-# define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
-# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \
- malloc_mutex_postfork_parent(tsdn, &(mu))
-# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \
- malloc_mutex_postfork_child(tsdn, &(mu))
-#else
-# define LOCKEDINT_MTX_DECLARE(name)
-# define LOCKEDINT_MTX(mtx) NULL
-# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
-# define LOCKEDINT_MTX_LOCK(tsdn, mu)
-# define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
-# define LOCKEDINT_MTX_PREFORK(tsdn, mu)
-# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
-# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
-#endif
-
-#ifdef JEMALLOC_ATOMIC_U64
-# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
-#else
-# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \
- malloc_mutex_assert_owner(tsdn, (mtx))
-#endif
-
-static inline uint64_t
-locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) {
- LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_u64(&p->val, ATOMIC_RELAXED);
-#else
- return p->val;
-#endif
-}
-
-static inline void
-locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
- uint64_t x) {
- LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
-#ifdef JEMALLOC_ATOMIC_U64
- atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED);
-#else
- p->val += x;
-#endif
-}
-
-static inline void
-locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
- uint64_t x) {
- LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED);
- assert(r - x <= r);
-#else
- p->val -= x;
- assert(p->val + x >= p->val);
-#endif
-}
-
-/* Increment and take modulus. Returns whether the modulo made any change. */
-static inline bool
-locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
- const uint64_t x, const uint64_t modulus) {
- LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
- uint64_t before, after;
- bool overflow;
-#ifdef JEMALLOC_ATOMIC_U64
- before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
- do {
- after = before + x;
- assert(after >= before);
- overflow = (after >= modulus);
- if (overflow) {
- after %= modulus;
- }
- } while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
- ATOMIC_RELAXED, ATOMIC_RELAXED));
-#else
- before = p->val;
- after = before + x;
- overflow = (after >= modulus);
- if (overflow) {
- after %= modulus;
- }
- p->val = after;
-#endif
- return overflow;
-}
-
-/*
- * Non-atomically sets *dst += src. *dst needs external synchronization.
- * This lets us avoid the cost of a fetch_add when its unnecessary (note that
- * the types here are atomic).
- */
-static inline void
-locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED);
- atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED);
-#else
- dst->val += src;
-#endif
-}
-
-static inline uint64_t
-locked_read_u64_unsynchronized(locked_u64_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_u64(&p->val, ATOMIC_RELAXED);
-#else
- return p->val;
-#endif
-}
-
-static inline void
-locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
-#else
- p->val = x;
-#endif
-}
-
-static inline size_t
-locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) {
- LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_zu(&p->val, ATOMIC_RELAXED);
-#else
- return atomic_load_zu(&p->val, ATOMIC_RELAXED);
-#endif
-}
-
-static inline void
-locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
- size_t x) {
- LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
-#ifdef JEMALLOC_ATOMIC_U64
- atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED);
-#else
- size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
- atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED);
-#endif
-}
-
-static inline void
-locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
- size_t x) {
- LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
-#ifdef JEMALLOC_ATOMIC_U64
- size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED);
- assert(r - x <= r);
-#else
- size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
- atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED);
-#endif
-}
-
-/* Like the _u64 variant, needs an externally synchronized *dst. */
-static inline void
-locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) {
- size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED);
- atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED);
-}
-
-/*
- * Unlike the _u64 variant, this is safe to call unconditionally.
- */
-static inline size_t
-locked_read_atomic_zu(locked_zu_t *p) {
- return atomic_load_zu(&p->val, ATOMIC_RELAXED);
-}
-
-#endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/log.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/log.h
deleted file mode 100644
index 64208586..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/log.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_LOG_H
-#define JEMALLOC_INTERNAL_LOG_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/malloc_io.h"
-#include "jemalloc/internal/mutex.h"
-
-#ifdef JEMALLOC_LOG
-# define JEMALLOC_LOG_VAR_BUFSIZE 1000
-#else
-# define JEMALLOC_LOG_VAR_BUFSIZE 1
-#endif
-
-#define JEMALLOC_LOG_BUFSIZE 4096
-
-/*
- * The log malloc_conf option is a '|'-delimited list of log_var name segments
- * which should be logged. The names are themselves hierarchical, with '.' as
- * the delimiter (a "segment" is just a prefix in the log namespace). So, if
- * you have:
- *
- * log("arena", "log msg for arena"); // 1
- * log("arena.a", "log msg for arena.a"); // 2
- * log("arena.b", "log msg for arena.b"); // 3
- * log("arena.a.a", "log msg for arena.a.a"); // 4
- * log("extent.a", "log msg for extent.a"); // 5
- * log("extent.b", "log msg for extent.b"); // 6
- *
- * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
- * 6 will print at runtime. You can enable logging from all log vars by
- * writing "log=.".
- *
- * None of this should be regarded as a stable API for right now. It's intended
- * as a debugging interface, to let us keep around some of our printf-debugging
- * statements.
- */
-
-extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
-extern atomic_b_t log_init_done;
-
-typedef struct log_var_s log_var_t;
-struct log_var_s {
- /*
- * Lowest bit is "inited", second lowest is "enabled". Putting them in
- * a single word lets us avoid any fences on weak architectures.
- */
- atomic_u_t state;
- const char *name;
-};
-
-#define LOG_NOT_INITIALIZED 0U
-#define LOG_INITIALIZED_NOT_ENABLED 1U
-#define LOG_ENABLED 2U
-
-#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
-
-/*
- * Returns the value we should assume for state (which is not necessarily
- * accurate; if logging is done before logging has finished initializing, then
- * we default to doing the safe thing by logging everything).
- */
-unsigned log_var_update_state(log_var_t *log_var);
-
-/* We factor out the metadata management to allow us to test more easily. */
-#define log_do_begin(log_var) \
-if (config_log) { \
- unsigned log_state = atomic_load_u(&(log_var).state, \
- ATOMIC_RELAXED); \
- if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
- log_state = log_var_update_state(&(log_var)); \
- assert(log_state != LOG_NOT_INITIALIZED); \
- } \
- if (log_state == LOG_ENABLED) { \
- {
- /* User code executes here. */
-#define log_do_end(log_var) \
- } \
- } \
-}
-
-/*
- * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
- * preprocessing. To work around this, we take all potential extra arguments in
- * a var-args functions. Since a varargs macro needs at least one argument in
- * the "...", we accept the format string there, and require that the first
- * argument in this "..." is a const char *.
- */
-static inline void
-log_impl_varargs(const char *name, ...) {
- char buf[JEMALLOC_LOG_BUFSIZE];
- va_list ap;
-
- va_start(ap, name);
- const char *format = va_arg(ap, const char *);
- size_t dst_offset = 0;
- dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
- dst_offset += malloc_vsnprintf(buf + dst_offset,
- JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
- dst_offset += malloc_snprintf(buf + dst_offset,
- JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
- va_end(ap);
-
- malloc_write(buf);
-}
-
-/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
-#define LOG(log_var_str, ...) \
-do { \
- static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
- log_do_begin(log_var) \
- log_impl_varargs((log_var).name, __VA_ARGS__); \
- log_do_end(log_var) \
-} while (0)
-
-#endif /* JEMALLOC_INTERNAL_LOG_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/malloc_io.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/malloc_io.h
deleted file mode 100644
index a375bdae..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/malloc_io.h
+++ /dev/null
@@ -1,105 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
-#define JEMALLOC_INTERNAL_MALLOC_IO_H
-
-#include "jemalloc/internal/jemalloc_internal_types.h"
-
-#ifdef _WIN32
-# ifdef _WIN64
-# define FMT64_PREFIX "ll"
-# define FMTPTR_PREFIX "ll"
-# else
-# define FMT64_PREFIX "ll"
-# define FMTPTR_PREFIX ""
-# endif
-# define FMTd32 "d"
-# define FMTu32 "u"
-# define FMTx32 "x"
-# define FMTd64 FMT64_PREFIX "d"
-# define FMTu64 FMT64_PREFIX "u"
-# define FMTx64 FMT64_PREFIX "x"
-# define FMTdPTR FMTPTR_PREFIX "d"
-# define FMTuPTR FMTPTR_PREFIX "u"
-# define FMTxPTR FMTPTR_PREFIX "x"
-#else
-# include <inttypes.h>
-# define FMTd32 PRId32
-# define FMTu32 PRIu32
-# define FMTx32 PRIx32
-# define FMTd64 PRId64
-# define FMTu64 PRIu64
-# define FMTx64 PRIx64
-# define FMTdPTR PRIdPTR
-# define FMTuPTR PRIuPTR
-# define FMTxPTR PRIxPTR
-#endif
-
-/* Size of stack-allocated buffer passed to buferror(). */
-#define BUFERROR_BUF 64
-
-/*
- * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
- * large enough for all possible uses within jemalloc.
- */
-#define MALLOC_PRINTF_BUFSIZE 4096
-
-write_cb_t wrtmessage;
-int buferror(int err, char *buf, size_t buflen);
-uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
- int base);
-void malloc_write(const char *s);
-
-/*
- * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
- * point math.
- */
-size_t malloc_vsnprintf(char *str, size_t size, const char *format,
- va_list ap);
-size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
- JEMALLOC_FORMAT_PRINTF(3, 4);
-/*
- * The caller can set write_cb to null to choose to print with the
- * je_malloc_message hook.
- */
-void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
- va_list ap);
-void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
- ...) JEMALLOC_FORMAT_PRINTF(3, 4);
-void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
-
-static inline ssize_t
-malloc_write_fd(int fd, const void *buf, size_t count) {
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
- /*
- * Use syscall(2) rather than write(2) when possible in order to avoid
- * the possibility of memory allocation within libc. This is necessary
- * on FreeBSD; most operating systems do not have this problem though.
- *
- * syscall() returns long or int, depending on platform, so capture the
- * result in the widest plausible type to avoid compiler warnings.
- */
- long result = syscall(SYS_write, fd, buf, count);
-#else
- ssize_t result = (ssize_t)write(fd, buf,
-#ifdef _WIN32
- (unsigned int)
-#endif
- count);
-#endif
- return (ssize_t)result;
-}
-
-static inline ssize_t
-malloc_read_fd(int fd, void *buf, size_t count) {
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
- long result = syscall(SYS_read, fd, buf, count);
-#else
- ssize_t result = read(fd, buf,
-#ifdef _WIN32
- (unsigned int)
-#endif
- count);
-#endif
- return (ssize_t)result;
-}
-
-#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mpsc_queue.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mpsc_queue.h
deleted file mode 100644
index 316ea9b1..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mpsc_queue.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H
-#define JEMALLOC_INTERNAL_MPSC_QUEUE_H
-
-#include "jemalloc/internal/atomic.h"
-
-/*
- * A concurrent implementation of a multi-producer, single-consumer queue. It
- * supports three concurrent operations:
- * - Push
- * - Push batch
- * - Pop batch
- *
- * These operations are all lock-free.
- *
- * The implementation is the simple two-stack queue built on a Treiber stack.
- * It's not terribly efficient, but this isn't expected to go into anywhere with
- * hot code. In fact, we don't really even need queue semantics in any
- * anticipated use cases; we could get away with just the stack. But this way
- * lets us frame the API in terms of the existing list types, which is a nice
- * convenience. We can save on cache misses by introducing our own (parallel)
- * single-linked list type here, and dropping FIFO semantics, if we need this to
- * get faster. Since we're currently providing queue semantics though, we use
- * the prev field in the link rather than the next field for Treiber-stack
- * linkage, so that we can preserve order for bash-pushed lists (recall that the
- * two-stack tricks reverses orders in the lock-free first stack).
- */
-
-#define mpsc_queue(a_type) \
-struct { \
- atomic_p_t tail; \
-}
-
-#define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type, \
- a_list_type) \
-/* Initialize a queue. */ \
-a_attr void \
-a_prefix##new(a_queue_type *queue); \
-/* Insert all items in src into the queue, clearing src. */ \
-a_attr void \
-a_prefix##push_batch(a_queue_type *queue, a_list_type *src); \
-/* Insert node into the queue. */ \
-a_attr void \
-a_prefix##push(a_queue_type *queue, a_type *node); \
-/* \
- * Pop all items in the queue into the list at dst. dst should already \
- * be initialized (and may contain existing items, which then remain \
- * in dst). \
- */ \
-a_attr void \
-a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst);
-
-#define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type, \
- a_list_type, a_link) \
-a_attr void \
-a_prefix##new(a_queue_type *queue) { \
- atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED); \
-} \
-a_attr void \
-a_prefix##push_batch(a_queue_type *queue, a_list_type *src) { \
- /* \
- * Reuse the ql list next field as the Treiber stack next \
- * field. \
- */ \
- a_type *first = ql_first(src); \
- a_type *last = ql_last(src, a_link); \
- void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
- do { \
- /* \
- * Note that this breaks the queue ring structure; \
- * it's not a ring any more! \
- */ \
- first->a_link.qre_prev = cur_tail; \
- /* \
- * Note: the upcoming CAS doesn't need an atomic; every \
- * push only needs to synchronize with the next pop, \
- * which we get from the release sequence rules. \
- */ \
- } while (!atomic_compare_exchange_weak_p(&queue->tail, \
- &cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED)); \
- ql_new(src); \
-} \
-a_attr void \
-a_prefix##push(a_queue_type *queue, a_type *node) { \
- ql_elm_new(node, a_link); \
- a_list_type list; \
- ql_new(&list); \
- ql_head_insert(&list, node, a_link); \
- a_prefix##push_batch(queue, &list); \
-} \
-a_attr void \
-a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) { \
- a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
- if (tail == NULL) { \
- /* \
- * In the common special case where there are no \
- * pending elements, bail early without a costly RMW. \
- */ \
- return; \
- } \
- tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE); \
- /* \
- * It's a single-consumer queue, so if cur started non-NULL, \
- * it'd better stay non-NULL. \
- */ \
- assert(tail != NULL); \
- /* \
- * We iterate through the stack and both fix up the link \
- * structure (stack insertion broke the list requirement that \
- * the list be circularly linked). It's just as efficient at \
- * this point to make the queue a "real" queue, so do that as \
- * well. \
- * If this ever gets to be a hot spot, we can omit this fixup \
- * and make the queue a bag (i.e. not necessarily ordered), but \
- * that would mean jettisoning the existing list API as the \
- * batch pushing/popping interface. \
- */ \
- a_list_type reversed; \
- ql_new(&reversed); \
- while (tail != NULL) { \
- /* \
- * Pop an item off the stack, prepend it onto the list \
- * (reversing the order). Recall that we use the \
- * list prev field as the Treiber stack next field to \
- * preserve order of batch-pushed items when reversed. \
- */ \
- a_type *next = tail->a_link.qre_prev; \
- ql_elm_new(tail, a_link); \
- ql_head_insert(&reversed, tail, a_link); \
- tail = next; \
- } \
- ql_concat(dst, &reversed, a_link); \
-}
-
-#endif /* JEMALLOC_INTERNAL_MPSC_QUEUE_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mutex.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mutex.h
deleted file mode 100644
index 63a0b1b3..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mutex.h
+++ /dev/null
@@ -1,319 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_H
-#define JEMALLOC_INTERNAL_MUTEX_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/mutex_prof.h"
-#include "jemalloc/internal/tsd.h"
-#include "jemalloc/internal/witness.h"
-
-extern int64_t opt_mutex_max_spin;
-
-typedef enum {
- /* Can only acquire one mutex of a given witness rank at a time. */
- malloc_mutex_rank_exclusive,
- /*
- * Can acquire multiple mutexes of the same witness rank, but in
- * address-ascending order only.
- */
- malloc_mutex_address_ordered
-} malloc_mutex_lock_order_t;
-
-typedef struct malloc_mutex_s malloc_mutex_t;
-struct malloc_mutex_s {
- union {
- struct {
- /*
- * prof_data is defined first to reduce cacheline
- * bouncing: the data is not touched by the mutex holder
- * during unlocking, while might be modified by
- * contenders. Having it before the mutex itself could
- * avoid prefetching a modified cacheline (for the
- * unlocking thread).
- */
- mutex_prof_data_t prof_data;
-#ifdef _WIN32
-# if _WIN32_WINNT >= 0x0600
- SRWLOCK lock;
-# else
- CRITICAL_SECTION lock;
-# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock lock;
-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
- pthread_mutex_t lock;
- malloc_mutex_t *postponed_next;
-#else
- pthread_mutex_t lock;
-#endif
- /*
- * Hint flag to avoid exclusive cache line contention
- * during spin waiting
- */
- atomic_b_t locked;
- };
- /*
- * We only touch witness when configured w/ debug. However we
- * keep the field in a union when !debug so that we don't have
- * to pollute the code base with #ifdefs, while avoid paying the
- * memory cost.
- */
-#if !defined(JEMALLOC_DEBUG)
- witness_t witness;
- malloc_mutex_lock_order_t lock_order;
-#endif
- };
-
-#if defined(JEMALLOC_DEBUG)
- witness_t witness;
- malloc_mutex_lock_order_t lock_order;
-#endif
-};
-
-#ifdef _WIN32
-# if _WIN32_WINNT >= 0x0600
-# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
-# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
-# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
-# else
-# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
-# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
-# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
-# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
-# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
-# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
-#else
-# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
-# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
-# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
-#endif
-
-#define LOCK_PROF_DATA_INITIALIZER \
- {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
- ATOMIC_INIT(0), 0, NULL, 0}
-
-#ifdef _WIN32
-# define MALLOC_MUTEX_INITIALIZER
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-# if defined(JEMALLOC_DEBUG)
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
-# else
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-# endif
-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-# if (defined(JEMALLOC_DEBUG))
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
-# else
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-# endif
-
-#else
-# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
-# if defined(JEMALLOC_DEBUG)
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
-# else
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-# endif
-#endif
-
-#ifdef JEMALLOC_LAZY_LOCK
-extern bool isthreaded;
-#else
-# undef isthreaded /* Undo private_namespace.h definition. */
-# define isthreaded true
-#endif
-
-bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
- witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
-void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
-bool malloc_mutex_boot(void);
-void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
-
-void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
-
-static inline void
-malloc_mutex_lock_final(malloc_mutex_t *mutex) {
- MALLOC_MUTEX_LOCK(mutex);
- atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
-}
-
-static inline bool
-malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
- return MALLOC_MUTEX_TRYLOCK(mutex);
-}
-
-static inline void
-mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- if (config_stats) {
- mutex_prof_data_t *data = &mutex->prof_data;
- data->n_lock_ops++;
- if (data->prev_owner != tsdn) {
- data->prev_owner = tsdn;
- data->n_owner_switches++;
- }
- }
-}
-
-/* Trylock: return false if the lock is successfully acquired. */
-static inline bool
-malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
- if (isthreaded) {
- if (malloc_mutex_trylock_final(mutex)) {
- atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
- return true;
- }
- mutex_owner_stats_update(tsdn, mutex);
- }
- witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
-
- return false;
-}
-
-/* Aggregate lock prof data. */
-static inline void
-malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
- nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
- if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
- nstime_copy(&sum->max_wait_time, &data->max_wait_time);
- }
-
- sum->n_wait_times += data->n_wait_times;
- sum->n_spin_acquired += data->n_spin_acquired;
-
- if (sum->max_n_thds < data->max_n_thds) {
- sum->max_n_thds = data->max_n_thds;
- }
- uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
- ATOMIC_RELAXED);
- uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
- &data->n_waiting_thds, ATOMIC_RELAXED);
- atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
- ATOMIC_RELAXED);
- sum->n_owner_switches += data->n_owner_switches;
- sum->n_lock_ops += data->n_lock_ops;
-}
-
-static inline void
-malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
- if (isthreaded) {
- if (malloc_mutex_trylock_final(mutex)) {
- malloc_mutex_lock_slow(mutex);
- atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
- }
- mutex_owner_stats_update(tsdn, mutex);
- }
- witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
-}
-
-static inline void
-malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
- witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
- if (isthreaded) {
- MALLOC_MUTEX_UNLOCK(mutex);
- }
-}
-
-static inline void
-malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
-}
-
-static inline void
-malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
- witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
-}
-
-static inline void
-malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
- /*
- * Not *really* allowed (we shouldn't be doing non-atomic loads of
- * atomic data), but the mutex protection makes this safe, and writing
- * a member-for-member copy is tedious for this situation.
- */
- *dst = *source;
- /* n_wait_thds is not reported (modified w/o locking). */
- atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
-}
-
-/* Copy the prof data from mutex for processing. */
-static inline void
-malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
- malloc_mutex_t *mutex) {
- /* Can only read holding the mutex. */
- malloc_mutex_assert_owner(tsdn, mutex);
- malloc_mutex_prof_copy(data, &mutex->prof_data);
-}
-
-static inline void
-malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
- malloc_mutex_t *mutex) {
- mutex_prof_data_t *source = &mutex->prof_data;
- /* Can only read holding the mutex. */
- malloc_mutex_assert_owner(tsdn, mutex);
-
- nstime_add(&data->tot_wait_time, &source->tot_wait_time);
- if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
- nstime_copy(&data->max_wait_time, &source->max_wait_time);
- }
- data->n_wait_times += source->n_wait_times;
- data->n_spin_acquired += source->n_spin_acquired;
- if (data->max_n_thds < source->max_n_thds) {
- data->max_n_thds = source->max_n_thds;
- }
- /* n_wait_thds is not reported. */
- atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
- data->n_owner_switches += source->n_owner_switches;
- data->n_lock_ops += source->n_lock_ops;
-}
-
-/* Compare the prof data and update to the maximum. */
-static inline void
-malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
- malloc_mutex_t *mutex) {
- mutex_prof_data_t *source = &mutex->prof_data;
- /* Can only read holding the mutex. */
- malloc_mutex_assert_owner(tsdn, mutex);
-
- if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
- nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
- }
- if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
- nstime_copy(&data->max_wait_time, &source->max_wait_time);
- }
- if (source->n_wait_times > data->n_wait_times) {
- data->n_wait_times = source->n_wait_times;
- }
- if (source->n_spin_acquired > data->n_spin_acquired) {
- data->n_spin_acquired = source->n_spin_acquired;
- }
- if (source->max_n_thds > data->max_n_thds) {
- data->max_n_thds = source->max_n_thds;
- }
- if (source->n_owner_switches > data->n_owner_switches) {
- data->n_owner_switches = source->n_owner_switches;
- }
- if (source->n_lock_ops > data->n_lock_ops) {
- data->n_lock_ops = source->n_lock_ops;
- }
- /* n_wait_thds is not reported. */
-}
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mutex_prof.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mutex_prof.h
deleted file mode 100644
index 4a526a5a..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/mutex_prof.h
+++ /dev/null
@@ -1,117 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
-#define JEMALLOC_INTERNAL_MUTEX_PROF_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/tsd_types.h"
-
-#define MUTEX_PROF_GLOBAL_MUTEXES \
- OP(background_thread) \
- OP(max_per_bg_thd) \
- OP(ctl) \
- OP(prof) \
- OP(prof_thds_data) \
- OP(prof_dump) \
- OP(prof_recent_alloc) \
- OP(prof_recent_dump) \
- OP(prof_stats)
-
-typedef enum {
-#define OP(mtx) global_prof_mutex_##mtx,
- MUTEX_PROF_GLOBAL_MUTEXES
-#undef OP
- mutex_prof_num_global_mutexes
-} mutex_prof_global_ind_t;
-
-#define MUTEX_PROF_ARENA_MUTEXES \
- OP(large) \
- OP(extent_avail) \
- OP(extents_dirty) \
- OP(extents_muzzy) \
- OP(extents_retained) \
- OP(decay_dirty) \
- OP(decay_muzzy) \
- OP(base) \
- OP(tcache_list) \
- OP(hpa_shard) \
- OP(hpa_shard_grow) \
- OP(hpa_sec)
-
-typedef enum {
-#define OP(mtx) arena_prof_mutex_##mtx,
- MUTEX_PROF_ARENA_MUTEXES
-#undef OP
- mutex_prof_num_arena_mutexes
-} mutex_prof_arena_ind_t;
-
-/*
- * The forth parameter is a boolean value that is true for derived rate counters
- * and false for real ones.
- */
-#define MUTEX_PROF_UINT64_COUNTERS \
- OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \
- OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \
- OP(num_wait, uint64_t, "n_waiting", false, num_wait) \
- OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \
- OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \
- OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \
- OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \
- OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \
- OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \
- OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \
- OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
-
-#define MUTEX_PROF_UINT32_COUNTERS \
- OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
-
-#define MUTEX_PROF_COUNTERS \
- MUTEX_PROF_UINT64_COUNTERS \
- MUTEX_PROF_UINT32_COUNTERS
-
-#define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
-
-#define COUNTER_ENUM(counter_list, t) \
- typedef enum { \
- counter_list \
- mutex_prof_num_##t##_counters \
- } mutex_prof_##t##_counter_ind_t;
-
-COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
-COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
-
-#undef COUNTER_ENUM
-#undef OP
-
-typedef struct {
- /*
- * Counters touched on the slow path, i.e. when there is lock
- * contention. We update them once we have the lock.
- */
- /* Total time (in nano seconds) spent waiting on this mutex. */
- nstime_t tot_wait_time;
- /* Max time (in nano seconds) spent on a single lock operation. */
- nstime_t max_wait_time;
- /* # of times have to wait for this mutex (after spinning). */
- uint64_t n_wait_times;
- /* # of times acquired the mutex through local spinning. */
- uint64_t n_spin_acquired;
- /* Max # of threads waiting for the mutex at the same time. */
- uint32_t max_n_thds;
- /* Current # of threads waiting on the lock. Atomic synced. */
- atomic_u32_t n_waiting_thds;
-
- /*
- * Data touched on the fast path. These are modified right after we
- * grab the lock, so it's placed closest to the end (i.e. right before
- * the lock) so that we have a higher chance of them being on the same
- * cacheline.
- */
- /* # of times the mutex holder is different than the previous one. */
- uint64_t n_owner_switches;
- /* Previous mutex holder, to facilitate n_owner_switches. */
- tsdn_t *prev_owner;
- /* # of lock() operations in total. */
- uint64_t n_lock_ops;
-} mutex_prof_data_t;
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/nstime.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/nstime.h
deleted file mode 100644
index 486e5cca..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/nstime.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_NSTIME_H
-#define JEMALLOC_INTERNAL_NSTIME_H
-
-/* Maximum supported number of seconds (~584 years). */
-#define NSTIME_SEC_MAX KQU(18446744072)
-
-#define NSTIME_MAGIC ((uint32_t)0xb8a9ce37)
-#ifdef JEMALLOC_DEBUG
-# define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC}
-#else
-# define NSTIME_ZERO_INITIALIZER {0}
-#endif
-
-typedef struct {
- uint64_t ns;
-#ifdef JEMALLOC_DEBUG
- uint32_t magic; /* Tracks if initialized. */
-#endif
-} nstime_t;
-
-static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER;
-
-void nstime_init(nstime_t *time, uint64_t ns);
-void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
-uint64_t nstime_ns(const nstime_t *time);
-uint64_t nstime_sec(const nstime_t *time);
-uint64_t nstime_msec(const nstime_t *time);
-uint64_t nstime_nsec(const nstime_t *time);
-void nstime_copy(nstime_t *time, const nstime_t *source);
-int nstime_compare(const nstime_t *a, const nstime_t *b);
-void nstime_add(nstime_t *time, const nstime_t *addend);
-void nstime_iadd(nstime_t *time, uint64_t addend);
-void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
-void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
-void nstime_imultiply(nstime_t *time, uint64_t multiplier);
-void nstime_idivide(nstime_t *time, uint64_t divisor);
-uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
-uint64_t nstime_ns_since(const nstime_t *past);
-
-typedef bool (nstime_monotonic_t)(void);
-extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
-
-typedef void (nstime_update_t)(nstime_t *);
-extern nstime_update_t *JET_MUTABLE nstime_update;
-
-typedef void (nstime_prof_update_t)(nstime_t *);
-extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update;
-
-void nstime_init_update(nstime_t *time);
-void nstime_prof_init_update(nstime_t *time);
-
-enum prof_time_res_e {
- prof_time_res_default = 0,
- prof_time_res_high = 1
-};
-typedef enum prof_time_res_e prof_time_res_t;
-
-extern prof_time_res_t opt_prof_time_res;
-extern const char *prof_time_res_mode_names[];
-
-JEMALLOC_ALWAYS_INLINE void
-nstime_init_zero(nstime_t *time) {
- nstime_copy(time, &nstime_zero);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-nstime_equals_zero(nstime_t *time) {
- int diff = nstime_compare(time, &nstime_zero);
- assert(diff >= 0);
- return diff == 0;
-}
-
-#endif /* JEMALLOC_INTERNAL_NSTIME_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pa.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pa.h
deleted file mode 100644
index 4748a05b..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pa.h
+++ /dev/null
@@ -1,243 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PA_H
-#define JEMALLOC_INTERNAL_PA_H
-
-#include "jemalloc/internal/base.h"
-#include "jemalloc/internal/decay.h"
-#include "jemalloc/internal/ecache.h"
-#include "jemalloc/internal/edata_cache.h"
-#include "jemalloc/internal/emap.h"
-#include "jemalloc/internal/hpa.h"
-#include "jemalloc/internal/lockedint.h"
-#include "jemalloc/internal/pac.h"
-#include "jemalloc/internal/pai.h"
-#include "jemalloc/internal/sec.h"
-
-/*
- * The page allocator; responsible for acquiring pages of memory for
- * allocations. It picks the implementation of the page allocator interface
- * (i.e. a pai_t) to handle a given page-level allocation request. For now, the
- * only such implementation is the PAC code ("page allocator classic"), but
- * others will be coming soon.
- */
-
-typedef struct pa_central_s pa_central_t;
-struct pa_central_s {
- hpa_central_t hpa;
-};
-
-/*
- * The stats for a particular pa_shard. Because of the way the ctl module
- * handles stats epoch data collection (it has its own arena_stats, and merges
- * the stats from each arena into it), this needs to live in the arena_stats_t;
- * hence we define it here and let the pa_shard have a pointer (rather than the
- * more natural approach of just embedding it in the pa_shard itself).
- *
- * We follow the arena_stats_t approach of marking the derived fields. These
- * are the ones that are not maintained on their own; instead, their values are
- * derived during those stats merges.
- */
-typedef struct pa_shard_stats_s pa_shard_stats_t;
-struct pa_shard_stats_s {
- /* Number of edata_t structs allocated by base, but not being used. */
- size_t edata_avail; /* Derived. */
- /*
- * Stats specific to the PAC. For now, these are the only stats that
- * exist, but there will eventually be other page allocators. Things
- * like edata_avail make sense in a cross-PA sense, but things like
- * npurges don't.
- */
- pac_stats_t pac_stats;
-};
-
-/*
- * The local allocator handle. Keeps the state necessary to satisfy page-sized
- * allocations.
- *
- * The contents are mostly internal to the PA module. The key exception is that
- * arena decay code is allowed to grab pointers to the dirty and muzzy ecaches
- * decay_ts, for a couple of queries, passing them back to a PA function, or
- * acquiring decay.mtx and looking at decay.purging. The reasoning is that,
- * while PA decides what and how to purge, the arena code decides when and where
- * (e.g. on what thread). It's allowed to use the presence of another purger to
- * decide.
- * (The background thread code also touches some other decay internals, but
- * that's not fundamental; its' just an artifact of a partial refactoring, and
- * its accesses could be straightforwardly moved inside the decay module).
- */
-typedef struct pa_shard_s pa_shard_t;
-struct pa_shard_s {
- /* The central PA this shard is associated with. */
- pa_central_t *central;
-
- /*
- * Number of pages in active extents.
- *
- * Synchronization: atomic.
- */
- atomic_zu_t nactive;
-
- /*
- * Whether or not we should prefer the hugepage allocator. Atomic since
- * it may be concurrently modified by a thread setting extent hooks.
- * Note that we still may do HPA operations in this arena; if use_hpa is
- * changed from true to false, we'll free back to the hugepage allocator
- * for those allocations.
- */
- atomic_b_t use_hpa;
-
- /*
- * If we never used the HPA to begin with, it wasn't initialized, and so
- * we shouldn't try to e.g. acquire its mutexes during fork. This
- * tracks that knowledge.
- */
- bool ever_used_hpa;
-
- /* Allocates from a PAC. */
- pac_t pac;
-
- /*
- * We place a small extent cache in front of the HPA, since we intend
- * these configurations to use many fewer arenas, and therefore have a
- * higher risk of hot locks.
- */
- sec_t hpa_sec;
- hpa_shard_t hpa_shard;
-
- /* The source of edata_t objects. */
- edata_cache_t edata_cache;
-
- unsigned ind;
-
- malloc_mutex_t *stats_mtx;
- pa_shard_stats_t *stats;
-
- /* The emap this shard is tied to. */
- emap_t *emap;
-
- /* The base from which we get the ehooks and allocate metadat. */
- base_t *base;
-};
-
-static inline bool
-pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
- return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
- pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0;
-}
-
-static inline ehooks_t *
-pa_shard_ehooks_get(pa_shard_t *shard) {
- return base_ehooks_get(shard->base);
-}
-
-/* Returns true on error. */
-bool pa_central_init(pa_central_t *central, base_t *base, bool hpa,
- hpa_hooks_t *hpa_hooks);
-
-/* Returns true on error. */
-bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
- emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
- malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold,
- ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
-
-/*
- * This isn't exposed to users; we allow late enablement of the HPA shard so
- * that we can boot without worrying about the HPA, then turn it on in a0.
- */
-bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
- const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts);
-
-/*
- * We stop using the HPA when custom extent hooks are installed, but still
- * redirect deallocations to it.
- */
-void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard);
-
-/*
- * This does the PA-specific parts of arena reset (i.e. freeing all active
- * allocations).
- */
-void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard);
-
-/*
- * Destroy all the remaining retained extents. Should only be called after
- * decaying all active, dirty, and muzzy extents to the retained state, as the
- * last step in destroying the shard.
- */
-void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard);
-
-/* Gets an edata for the given allocation. */
-edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
- size_t alignment, bool slab, szind_t szind, bool zero, bool guarded,
- bool *deferred_work_generated);
-/* Returns true on error, in which case nothing changed. */
-bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
- size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated);
-/*
- * The same. Sets *generated_dirty to true if we produced new dirty pages, and
- * false otherwise.
- */
-bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
- size_t new_size, szind_t szind, bool *deferred_work_generated);
-/*
- * Frees the given edata back to the pa. Sets *generated_dirty if we produced
- * new dirty pages (well, we always set it for now; but this need not be the
- * case).
- * (We could make generated_dirty the return value of course, but this is more
- * consistent with the shrink pathway and our error codes here).
- */
-void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
- bool *deferred_work_generated);
-bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
- ssize_t decay_ms, pac_purge_eagerness_t eagerness);
-ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
-
-/*
- * Do deferred work on this PA shard.
- *
- * Morally, this should do both PAC decay and the HPA deferred work. For now,
- * though, the arena, background thread, and PAC modules are tightly interwoven
- * in a way that's tricky to extricate, so we only do the HPA-specific parts.
- */
-void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
- bool deferral_allowed);
-void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
-void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
-uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
-
-/******************************************************************************/
-/*
- * Various bits of "boring" functionality that are still part of this module,
- * but that we relegate to pa_extra.c, to keep the core logic in pa.c as
- * readable as possible.
- */
-
-/*
- * These fork phases are synchronized with the arena fork phase numbering to
- * make it easy to keep straight. That's why there's no prefork1.
- */
-void pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard);
-void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard);
-void pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard);
-void pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard);
-void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard);
-void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard);
-void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
-
-void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
- size_t *ndirty, size_t *nmuzzy);
-
-void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
- pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
- hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
- size_t *resident);
-
-/*
- * Reads the PA-owned mutex stats into the output stats array, at the
- * appropriate positions. Morally, these stats should really live in
- * pa_shard_stats_t, but the indices are sort of baked into the various mutex
- * prof macros. This would be a good thing to do at some point.
- */
-void pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
- mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]);
-
-#endif /* JEMALLOC_INTERNAL_PA_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pac.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pac.h
deleted file mode 100644
index 01c4e6af..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pac.h
+++ /dev/null
@@ -1,179 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PAC_H
-#define JEMALLOC_INTERNAL_PAC_H
-
-#include "jemalloc/internal/exp_grow.h"
-#include "jemalloc/internal/pai.h"
-#include "san_bump.h"
-
-
-/*
- * Page allocator classic; an implementation of the PAI interface that:
- * - Can be used for arenas with custom extent hooks.
- * - Can always satisfy any allocation request (including highly-fragmentary
- * ones).
- * - Can use efficient OS-level zeroing primitives for demand-filled pages.
- */
-
-/* How "eager" decay/purging should be. */
-enum pac_purge_eagerness_e {
- PAC_PURGE_ALWAYS,
- PAC_PURGE_NEVER,
- PAC_PURGE_ON_EPOCH_ADVANCE
-};
-typedef enum pac_purge_eagerness_e pac_purge_eagerness_t;
-
-typedef struct pac_decay_stats_s pac_decay_stats_t;
-struct pac_decay_stats_s {
- /* Total number of purge sweeps. */
- locked_u64_t npurge;
- /* Total number of madvise calls made. */
- locked_u64_t nmadvise;
- /* Total number of pages purged. */
- locked_u64_t purged;
-};
-
-typedef struct pac_estats_s pac_estats_t;
-struct pac_estats_s {
- /*
- * Stats for a given index in the range [0, SC_NPSIZES] in the various
- * ecache_ts.
- * We track both bytes and # of extents: two extents in the same bucket
- * may have different sizes if adjacent size classes differ by more than
- * a page, so bytes cannot always be derived from # of extents.
- */
- size_t ndirty;
- size_t dirty_bytes;
- size_t nmuzzy;
- size_t muzzy_bytes;
- size_t nretained;
- size_t retained_bytes;
-};
-
-typedef struct pac_stats_s pac_stats_t;
-struct pac_stats_s {
- pac_decay_stats_t decay_dirty;
- pac_decay_stats_t decay_muzzy;
-
- /*
- * Number of unused virtual memory bytes currently retained. Retained
- * bytes are technically mapped (though always decommitted or purged),
- * but they are excluded from the mapped statistic (above).
- */
- size_t retained; /* Derived. */
-
- /*
- * Number of bytes currently mapped, excluding retained memory (and any
- * base-allocated memory, which is tracked by the arena stats).
- *
- * We name this "pac_mapped" to avoid confusion with the arena_stats
- * "mapped".
- */
- atomic_zu_t pac_mapped;
-
- /* VM space had to be leaked (undocumented). Normally 0. */
- atomic_zu_t abandoned_vm;
-};
-
-typedef struct pac_s pac_t;
-struct pac_s {
- /*
- * Must be the first member (we convert it to a PAC given only a
- * pointer). The handle to the allocation interface.
- */
- pai_t pai;
- /*
- * Collections of extents that were previously allocated. These are
- * used when allocating extents, in an attempt to re-use address space.
- *
- * Synchronization: internal.
- */
- ecache_t ecache_dirty;
- ecache_t ecache_muzzy;
- ecache_t ecache_retained;
-
- base_t *base;
- emap_t *emap;
- edata_cache_t *edata_cache;
-
- /* The grow info for the retained ecache. */
- exp_grow_t exp_grow;
- malloc_mutex_t grow_mtx;
-
- /* Special allocator for guarded frequently reused extents. */
- san_bump_alloc_t sba;
-
- /* How large extents should be before getting auto-purged. */
- atomic_zu_t oversize_threshold;
-
- /*
- * Decay-based purging state, responsible for scheduling extent state
- * transitions.
- *
- * Synchronization: via the internal mutex.
- */
- decay_t decay_dirty; /* dirty --> muzzy */
- decay_t decay_muzzy; /* muzzy --> retained */
-
- malloc_mutex_t *stats_mtx;
- pac_stats_t *stats;
-
- /* Extent serial number generator state. */
- atomic_zu_t extent_sn_next;
-};
-
-bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
- edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
- ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
- malloc_mutex_t *stats_mtx);
-
-static inline size_t
-pac_mapped(pac_t *pac) {
- return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
-}
-
-static inline ehooks_t *
-pac_ehooks_get(pac_t *pac) {
- return base_ehooks_get(pac->base);
-}
-
-/*
- * All purging functions require holding decay->mtx. This is one of the few
- * places external modules are allowed to peek inside pa_shard_t internals.
- */
-
-/*
- * Decays the number of pages currently in the ecache. This might not leave the
- * ecache empty if other threads are inserting dirty objects into it
- * concurrently with the call.
- */
-void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
- pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
-/*
- * Updates decay settings for the current time, and conditionally purges in
- * response (depending on decay_purge_setting). Returns whether or not the
- * epoch advanced.
- */
-bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
- pac_decay_stats_t *decay_stats, ecache_t *ecache,
- pac_purge_eagerness_t eagerness);
-
-/*
- * Gets / sets the maximum amount that we'll grow an arena down the
- * grow-retained pathways (unless forced to by an allocaction request).
- *
- * Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
- * care about the previous value.
- *
- * Returns true on error (if the new limit is not valid).
- */
-bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
- size_t *new_limit);
-
-bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
- ssize_t decay_ms, pac_purge_eagerness_t eagerness);
-ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
-
-void pac_reset(tsdn_t *tsdn, pac_t *pac);
-void pac_destroy(tsdn_t *tsdn, pac_t *pac);
-
-#endif /* JEMALLOC_INTERNAL_PAC_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pages.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pages.h
deleted file mode 100644
index ad1f606a..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pages.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
-#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
-
-/* Page size. LG_PAGE is determined by the configure script. */
-#ifdef PAGE_MASK
-# undef PAGE_MASK
-#endif
-#define PAGE ((size_t)(1U << LG_PAGE))
-#define PAGE_MASK ((size_t)(PAGE - 1))
-/* Return the page base address for the page containing address a. */
-#define PAGE_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~PAGE_MASK))
-/* Return the smallest pagesize multiple that is >= s. */
-#define PAGE_CEILING(s) \
- (((s) + PAGE_MASK) & ~PAGE_MASK)
-/* Return the largest pagesize multiple that is <=s. */
-#define PAGE_FLOOR(s) \
- ((s) & ~PAGE_MASK)
-
-/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
-#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
-#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
-
-#if LG_HUGEPAGE != 0
-# define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
-#else
-/*
- * It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If
- * we can't autodetect the hugepage size, it gets treated as 0, in which case
- * we'll trigger a compiler error in those arrays. Avoid this case by ensuring
- * that this value is at least 1. (We won't ever run in this degraded state;
- * hpa_supported() returns false in this case.
- */
-# define HUGEPAGE_PAGES 1
-#endif
-
-/* Return the huge page base address for the huge page containing address a. */
-#define HUGEPAGE_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
-/* Return the smallest pagesize multiple that is >= s. */
-#define HUGEPAGE_CEILING(s) \
- (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
-
-/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
-#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define PAGES_CAN_PURGE_LAZY
-#endif
-/*
- * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
- *
- * The only supported way to hard-purge on Windows is to decommit and then
- * re-commit, but doing so is racy, and if re-commit fails it's a pain to
- * propagate the "poisoned" memory state. Since we typically decommit as the
- * next step after purging on Windows anyway, there's no point in adding such
- * complexity.
- */
-#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
- defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
- defined(JEMALLOC_MAPS_COALESCE))
-# define PAGES_CAN_PURGE_FORCED
-#endif
-
-static const bool pages_can_purge_lazy =
-#ifdef PAGES_CAN_PURGE_LAZY
- true
-#else
- false
-#endif
- ;
-static const bool pages_can_purge_forced =
-#ifdef PAGES_CAN_PURGE_FORCED
- true
-#else
- false
-#endif
- ;
-
-#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
-# define PAGES_CAN_HUGIFY
-#endif
-
-static const bool pages_can_hugify =
-#ifdef PAGES_CAN_HUGIFY
- true
-#else
- false
-#endif
- ;
-
-typedef enum {
- thp_mode_default = 0, /* Do not change hugepage settings. */
- thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
- thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
-
- thp_mode_names_limit = 3, /* Used for option processing. */
- thp_mode_not_supported = 3 /* No THP support detected. */
-} thp_mode_t;
-
-#define THP_MODE_DEFAULT thp_mode_default
-extern thp_mode_t opt_thp;
-extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
-extern const char *thp_mode_names[];
-
-void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
-void pages_unmap(void *addr, size_t size);
-bool pages_commit(void *addr, size_t size);
-bool pages_decommit(void *addr, size_t size);
-bool pages_purge_lazy(void *addr, size_t size);
-bool pages_purge_forced(void *addr, size_t size);
-bool pages_huge(void *addr, size_t size);
-bool pages_nohuge(void *addr, size_t size);
-bool pages_dontdump(void *addr, size_t size);
-bool pages_dodump(void *addr, size_t size);
-bool pages_boot(void);
-void pages_set_thp_state (void *ptr, size_t size);
-void pages_mark_guards(void *head, void *tail);
-void pages_unmark_guards(void *head, void *tail);
-
-#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pai.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pai.h
deleted file mode 100644
index d978cd7d..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/pai.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PAI_H
-#define JEMALLOC_INTERNAL_PAI_H
-
-/* An interface for page allocation. */
-
-typedef struct pai_s pai_t;
-struct pai_s {
- /* Returns NULL on failure. */
- edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
- size_t alignment, bool zero, bool guarded, bool frequent_reuse,
- bool *deferred_work_generated);
- /*
- * Returns the number of extents added to the list (which may be fewer
- * than requested, in case of OOM). The list should already be
- * initialized. The only alignment guarantee is page-alignment, and
- * the results are not necessarily zeroed.
- */
- size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
- size_t nallocs, edata_list_active_t *results,
- bool *deferred_work_generated);
- bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
- size_t old_size, size_t new_size, bool zero,
- bool *deferred_work_generated);
- bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
- size_t old_size, size_t new_size, bool *deferred_work_generated);
- void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
- bool *deferred_work_generated);
- /* This function empties out list as a side-effect of being called. */
- void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
- edata_list_active_t *list, bool *deferred_work_generated);
- uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
-};
-
-/*
- * These are just simple convenience functions to avoid having to reference the
- * same pai_t twice on every invocation.
- */
-
-static inline edata_t *
-pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
- bool zero, bool guarded, bool frequent_reuse,
- bool *deferred_work_generated) {
- return self->alloc(tsdn, self, size, alignment, zero, guarded,
- frequent_reuse, deferred_work_generated);
-}
-
-static inline size_t
-pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
- edata_list_active_t *results, bool *deferred_work_generated) {
- return self->alloc_batch(tsdn, self, size, nallocs, results,
- deferred_work_generated);
-}
-
-static inline bool
-pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
- size_t new_size, bool zero, bool *deferred_work_generated) {
- return self->expand(tsdn, self, edata, old_size, new_size, zero,
- deferred_work_generated);
-}
-
-static inline bool
-pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
- size_t new_size, bool *deferred_work_generated) {
- return self->shrink(tsdn, self, edata, old_size, new_size,
- deferred_work_generated);
-}
-
-static inline void
-pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
- bool *deferred_work_generated) {
- self->dalloc(tsdn, self, edata, deferred_work_generated);
-}
-
-static inline void
-pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
- bool *deferred_work_generated) {
- self->dalloc_batch(tsdn, self, list, deferred_work_generated);
-}
-
-static inline uint64_t
-pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
- return self->time_until_deferred_work(tsdn, self);
-}
-
-/*
- * An implementation of batch allocation that simply calls alloc once for
- * each item in the list.
- */
-size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
- size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
-/* Ditto, for dalloc. */
-void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
- edata_list_active_t *list, bool *deferred_work_generated);
-
-#endif /* JEMALLOC_INTERNAL_PAI_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/peak.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/peak.h
deleted file mode 100644
index 59da3e41..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/peak.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PEAK_H
-#define JEMALLOC_INTERNAL_PEAK_H
-
-typedef struct peak_s peak_t;
-struct peak_s {
- /* The highest recorded peak value, after adjustment (see below). */
- uint64_t cur_max;
- /*
- * The difference between alloc and dalloc at the last set_zero call;
- * this lets us cancel out the appropriate amount of excess.
- */
- uint64_t adjustment;
-};
-
-#define PEAK_INITIALIZER {0, 0}
-
-static inline uint64_t
-peak_max(peak_t *peak) {
- return peak->cur_max;
-}
-
-static inline void
-peak_update(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
- int64_t candidate_max = (int64_t)(alloc - dalloc - peak->adjustment);
- if (candidate_max > (int64_t)peak->cur_max) {
- peak->cur_max = candidate_max;
- }
-}
-
-/* Resets the counter to zero; all peaks are now relative to this point. */
-static inline void
-peak_set_zero(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
- peak->cur_max = 0;
- peak->adjustment = alloc - dalloc;
-}
-
-#endif /* JEMALLOC_INTERNAL_PEAK_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/peak_event.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/peak_event.h
deleted file mode 100644
index b808ce04..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/peak_event.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H
-#define JEMALLOC_INTERNAL_PEAK_EVENT_H
-
-/*
- * While peak.h contains the simple helper struct that tracks state, this
- * contains the allocator tie-ins (and knows about tsd, the event module, etc.).
- */
-
-/* Update the peak with current tsd state. */
-void peak_event_update(tsd_t *tsd);
-/* Set current state to zero. */
-void peak_event_zero(tsd_t *tsd);
-uint64_t peak_event_max(tsd_t *tsd);
-
-/* Manual hooks. */
-/* The activity-triggered hooks. */
-uint64_t peak_alloc_new_event_wait(tsd_t *tsd);
-uint64_t peak_alloc_postponed_event_wait(tsd_t *tsd);
-void peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed);
-uint64_t peak_dalloc_new_event_wait(tsd_t *tsd);
-uint64_t peak_dalloc_postponed_event_wait(tsd_t *tsd);
-void peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
-
-#endif /* JEMALLOC_INTERNAL_PEAK_EVENT_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ph.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ph.h
deleted file mode 100644
index 5f091c5f..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ph.h
+++ /dev/null
@@ -1,520 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PH_H
-#define JEMALLOC_INTERNAL_PH_H
-
-/*
- * A Pairing Heap implementation.
- *
- * "The Pairing Heap: A New Form of Self-Adjusting Heap"
- * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
- *
- * With auxiliary twopass list, described in a follow on paper.
- *
- * "Pairing Heaps: Experiments and Analysis"
- * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
- *
- *******************************************************************************
- *
- * We include a non-obvious optimization:
- * - First, we introduce a new pop-and-link operation; pop the two most
- * recently-inserted items off the aux-list, link them, and push the resulting
- * heap.
- * - We maintain a count of the number of insertions since the last time we
- * merged the aux-list (i.e. via first() or remove_first()). After N inserts,
- * we do ffs(N) pop-and-link operations.
- *
- * One way to think of this is that we're progressively building up a tree in
- * the aux-list, rather than a linked-list (think of the series of merges that
- * will be performed as the aux-count grows).
- *
- * There's a couple reasons we benefit from this:
- * - Ordinarily, after N insertions, the aux-list is of size N. With our
- * strategy, it's of size O(log(N)). So we decrease the worst-case time of
- * first() calls, and reduce the average cost of remove_min calls. Since
- * these almost always occur while holding a lock, we practically reduce the
- * frequency of unusually long hold times.
- * - This moves the bulk of the work of merging the aux-list onto the threads
- * that are inserting into the heap. In some common scenarios, insertions
- * happen in bulk, from a single thread (think tcache flushing; we potentially
- * move many slabs from slabs_full to slabs_nonfull). All the nodes in this
- * case are in the inserting threads cache, and linking them is very cheap
- * (cache misses dominate linking cost). Without this optimization, linking
- * happens on the next call to remove_first. Since that remove_first call
- * likely happens on a different thread (or at least, after the cache has
- * gotten cold if done on the same thread), deferring linking trades cheap
- * link operations now for expensive ones later.
- *
- * The ffs trick keeps amortized insert cost at constant time. Similar
- * strategies based on periodically sorting the list after a batch of operations
- * perform worse than this in practice, even with various fancy tricks; they
- * all took amortized complexity of an insert from O(1) to O(log(n)).
- */
-
-typedef int (*ph_cmp_t)(void *, void *);
-
-/* Node structure. */
-typedef struct phn_link_s phn_link_t;
-struct phn_link_s {
- void *prev;
- void *next;
- void *lchild;
-};
-
-typedef struct ph_s ph_t;
-struct ph_s {
- void *root;
- /*
- * Inserts done since the last aux-list merge. This is not necessarily
- * the size of the aux-list, since it's possible that removals have
- * happened since, and we don't track whether or not those removals are
- * from the aux list.
- */
- size_t auxcount;
-};
-
-JEMALLOC_ALWAYS_INLINE phn_link_t *
-phn_link_get(void *phn, size_t offset) {
- return (phn_link_t *)(((uintptr_t)phn) + offset);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-phn_link_init(void *phn, size_t offset) {
- phn_link_get(phn, offset)->prev = NULL;
- phn_link_get(phn, offset)->next = NULL;
- phn_link_get(phn, offset)->lchild = NULL;
-}
-
-/* Internal utility helpers. */
-JEMALLOC_ALWAYS_INLINE void *
-phn_lchild_get(void *phn, size_t offset) {
- return phn_link_get(phn, offset)->lchild;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-phn_lchild_set(void *phn, void *lchild, size_t offset) {
- phn_link_get(phn, offset)->lchild = lchild;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-phn_next_get(void *phn, size_t offset) {
- return phn_link_get(phn, offset)->next;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-phn_next_set(void *phn, void *next, size_t offset) {
- phn_link_get(phn, offset)->next = next;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-phn_prev_get(void *phn, size_t offset) {
- return phn_link_get(phn, offset)->prev;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-phn_prev_set(void *phn, void *prev, size_t offset) {
- phn_link_get(phn, offset)->prev = prev;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-phn_merge_ordered(void *phn0, void *phn1, size_t offset,
- ph_cmp_t cmp) {
- void *phn0child;
-
- assert(phn0 != NULL);
- assert(phn1 != NULL);
- assert(cmp(phn0, phn1) <= 0);
-
- phn_prev_set(phn1, phn0, offset);
- phn0child = phn_lchild_get(phn0, offset);
- phn_next_set(phn1, phn0child, offset);
- if (phn0child != NULL) {
- phn_prev_set(phn0child, phn1, offset);
- }
- phn_lchild_set(phn0, phn1, offset);
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-phn_merge(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) {
- void *result;
- if (phn0 == NULL) {
- result = phn1;
- } else if (phn1 == NULL) {
- result = phn0;
- } else if (cmp(phn0, phn1) < 0) {
- phn_merge_ordered(phn0, phn1, offset, cmp);
- result = phn0;
- } else {
- phn_merge_ordered(phn1, phn0, offset, cmp);
- result = phn1;
- }
- return result;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-phn_merge_siblings(void *phn, size_t offset, ph_cmp_t cmp) {
- void *head = NULL;
- void *tail = NULL;
- void *phn0 = phn;
- void *phn1 = phn_next_get(phn0, offset);
-
- /*
- * Multipass merge, wherein the first two elements of a FIFO
- * are repeatedly merged, and each result is appended to the
- * singly linked FIFO, until the FIFO contains only a single
- * element. We start with a sibling list but no reference to
- * its tail, so we do a single pass over the sibling list to
- * populate the FIFO.
- */
- if (phn1 != NULL) {
- void *phnrest = phn_next_get(phn1, offset);
- if (phnrest != NULL) {
- phn_prev_set(phnrest, NULL, offset);
- }
- phn_prev_set(phn0, NULL, offset);
- phn_next_set(phn0, NULL, offset);
- phn_prev_set(phn1, NULL, offset);
- phn_next_set(phn1, NULL, offset);
- phn0 = phn_merge(phn0, phn1, offset, cmp);
- head = tail = phn0;
- phn0 = phnrest;
- while (phn0 != NULL) {
- phn1 = phn_next_get(phn0, offset);
- if (phn1 != NULL) {
- phnrest = phn_next_get(phn1, offset);
- if (phnrest != NULL) {
- phn_prev_set(phnrest, NULL, offset);
- }
- phn_prev_set(phn0, NULL, offset);
- phn_next_set(phn0, NULL, offset);
- phn_prev_set(phn1, NULL, offset);
- phn_next_set(phn1, NULL, offset);
- phn0 = phn_merge(phn0, phn1, offset, cmp);
- phn_next_set(tail, phn0, offset);
- tail = phn0;
- phn0 = phnrest;
- } else {
- phn_next_set(tail, phn0, offset);
- tail = phn0;
- phn0 = NULL;
- }
- }
- phn0 = head;
- phn1 = phn_next_get(phn0, offset);
- if (phn1 != NULL) {
- while (true) {
- head = phn_next_get(phn1, offset);
- assert(phn_prev_get(phn0, offset) == NULL);
- phn_next_set(phn0, NULL, offset);
- assert(phn_prev_get(phn1, offset) == NULL);
- phn_next_set(phn1, NULL, offset);
- phn0 = phn_merge(phn0, phn1, offset, cmp);
- if (head == NULL) {
- break;
- }
- phn_next_set(tail, phn0, offset);
- tail = phn0;
- phn0 = head;
- phn1 = phn_next_get(phn0, offset);
- }
- }
- }
- return phn0;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-ph_merge_aux(ph_t *ph, size_t offset, ph_cmp_t cmp) {
- ph->auxcount = 0;
- void *phn = phn_next_get(ph->root, offset);
- if (phn != NULL) {
- phn_prev_set(ph->root, NULL, offset);
- phn_next_set(ph->root, NULL, offset);
- phn_prev_set(phn, NULL, offset);
- phn = phn_merge_siblings(phn, offset, cmp);
- assert(phn_next_get(phn, offset) == NULL);
- ph->root = phn_merge(ph->root, phn, offset, cmp);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ph_merge_children(void *phn, size_t offset, ph_cmp_t cmp) {
- void *result;
- void *lchild = phn_lchild_get(phn, offset);
- if (lchild == NULL) {
- result = NULL;
- } else {
- result = phn_merge_siblings(lchild, offset, cmp);
- }
- return result;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-ph_new(ph_t *ph) {
- ph->root = NULL;
- ph->auxcount = 0;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-ph_empty(ph_t *ph) {
- return ph->root == NULL;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ph_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
- if (ph->root == NULL) {
- return NULL;
- }
- ph_merge_aux(ph, offset, cmp);
- return ph->root;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ph_any(ph_t *ph, size_t offset) {
- if (ph->root == NULL) {
- return NULL;
- }
- void *aux = phn_next_get(ph->root, offset);
- if (aux != NULL) {
- return aux;
- }
- return ph->root;
-}
-
-/* Returns true if we should stop trying to merge. */
-JEMALLOC_ALWAYS_INLINE bool
-ph_try_aux_merge_pair(ph_t *ph, size_t offset, ph_cmp_t cmp) {
- assert(ph->root != NULL);
- void *phn0 = phn_next_get(ph->root, offset);
- if (phn0 == NULL) {
- return true;
- }
- void *phn1 = phn_next_get(phn0, offset);
- if (phn1 == NULL) {
- return true;
- }
- void *next_phn1 = phn_next_get(phn1, offset);
- phn_next_set(phn0, NULL, offset);
- phn_prev_set(phn0, NULL, offset);
- phn_next_set(phn1, NULL, offset);
- phn_prev_set(phn1, NULL, offset);
- phn0 = phn_merge(phn0, phn1, offset, cmp);
- phn_next_set(phn0, next_phn1, offset);
- if (next_phn1 != NULL) {
- phn_prev_set(next_phn1, phn0, offset);
- }
- phn_next_set(ph->root, phn0, offset);
- phn_prev_set(phn0, ph->root, offset);
- return next_phn1 == NULL;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-ph_insert(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
- phn_link_init(phn, offset);
-
- /*
- * Treat the root as an aux list during insertion, and lazily merge
- * during a_prefix##remove_first(). For elements that are inserted,
- * then removed via a_prefix##remove() before the aux list is ever
- * processed, this makes insert/remove constant-time, whereas eager
- * merging would make insert O(log n).
- */
- if (ph->root == NULL) {
- ph->root = phn;
- } else {
- /*
- * As a special case, check to see if we can replace the root.
- * This is practically common in some important cases, and lets
- * us defer some insertions (hopefully, until the point where
- * some of the items in the aux list have been removed, savings
- * us from linking them at all).
- */
- if (cmp(phn, ph->root) < 0) {
- phn_lchild_set(phn, ph->root, offset);
- phn_prev_set(ph->root, phn, offset);
- ph->root = phn;
- ph->auxcount = 0;
- return;
- }
- ph->auxcount++;
- phn_next_set(phn, phn_next_get(ph->root, offset), offset);
- if (phn_next_get(ph->root, offset) != NULL) {
- phn_prev_set(phn_next_get(ph->root, offset), phn,
- offset);
- }
- phn_prev_set(phn, ph->root, offset);
- phn_next_set(ph->root, phn, offset);
- }
- if (ph->auxcount > 1) {
- unsigned nmerges = ffs_zu(ph->auxcount - 1);
- bool done = false;
- for (unsigned i = 0; i < nmerges && !done; i++) {
- done = ph_try_aux_merge_pair(ph, offset, cmp);
- }
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ph_remove_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
- void *ret;
-
- if (ph->root == NULL) {
- return NULL;
- }
- ph_merge_aux(ph, offset, cmp);
- ret = ph->root;
- ph->root = ph_merge_children(ph->root, offset, cmp);
-
- return ret;
-
-}
-
-JEMALLOC_ALWAYS_INLINE void
-ph_remove(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
- void *replace;
- void *parent;
-
- if (ph->root == phn) {
- /*
- * We can delete from aux list without merging it, but we need
- * to merge if we are dealing with the root node and it has
- * children.
- */
- if (phn_lchild_get(phn, offset) == NULL) {
- ph->root = phn_next_get(phn, offset);
- if (ph->root != NULL) {
- phn_prev_set(ph->root, NULL, offset);
- }
- return;
- }
- ph_merge_aux(ph, offset, cmp);
- if (ph->root == phn) {
- ph->root = ph_merge_children(ph->root, offset, cmp);
- return;
- }
- }
-
- /* Get parent (if phn is leftmost child) before mutating. */
- if ((parent = phn_prev_get(phn, offset)) != NULL) {
- if (phn_lchild_get(parent, offset) != phn) {
- parent = NULL;
- }
- }
- /* Find a possible replacement node, and link to parent. */
- replace = ph_merge_children(phn, offset, cmp);
- /* Set next/prev for sibling linked list. */
- if (replace != NULL) {
- if (parent != NULL) {
- phn_prev_set(replace, parent, offset);
- phn_lchild_set(parent, replace, offset);
- } else {
- phn_prev_set(replace, phn_prev_get(phn, offset),
- offset);
- if (phn_prev_get(phn, offset) != NULL) {
- phn_next_set(phn_prev_get(phn, offset), replace,
- offset);
- }
- }
- phn_next_set(replace, phn_next_get(phn, offset), offset);
- if (phn_next_get(phn, offset) != NULL) {
- phn_prev_set(phn_next_get(phn, offset), replace,
- offset);
- }
- } else {
- if (parent != NULL) {
- void *next = phn_next_get(phn, offset);
- phn_lchild_set(parent, next, offset);
- if (next != NULL) {
- phn_prev_set(next, parent, offset);
- }
- } else {
- assert(phn_prev_get(phn, offset) != NULL);
- phn_next_set(
- phn_prev_get(phn, offset),
- phn_next_get(phn, offset), offset);
- }
- if (phn_next_get(phn, offset) != NULL) {
- phn_prev_set(
- phn_next_get(phn, offset),
- phn_prev_get(phn, offset), offset);
- }
- }
-}
-
-#define ph_structs(a_prefix, a_type) \
-typedef struct { \
- phn_link_t link; \
-} a_prefix##_link_t; \
- \
-typedef struct { \
- ph_t ph; \
-} a_prefix##_t;
-
-/*
- * The ph_proto() macro generates function prototypes that correspond to the
- * functions generated by an equivalently parameterized call to ph_gen().
- */
-#define ph_proto(a_attr, a_prefix, a_type) \
- \
-a_attr void a_prefix##_new(a_prefix##_t *ph); \
-a_attr bool a_prefix##_empty(a_prefix##_t *ph); \
-a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \
-a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \
-a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \
-a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \
-a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \
-a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph);
-
-/* The ph_gen() macro generates a type-specific pairing heap implementation. */
-#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \
-JEMALLOC_ALWAYS_INLINE int \
-a_prefix##_ph_cmp(void *a, void *b) { \
- return a_cmp((a_type *)a, (a_type *)b); \
-} \
- \
-a_attr void \
-a_prefix##_new(a_prefix##_t *ph) { \
- ph_new(&ph->ph); \
-} \
- \
-a_attr bool \
-a_prefix##_empty(a_prefix##_t *ph) { \
- return ph_empty(&ph->ph); \
-} \
- \
-a_attr a_type * \
-a_prefix##_first(a_prefix##_t *ph) { \
- return ph_first(&ph->ph, offsetof(a_type, a_field), \
- &a_prefix##_ph_cmp); \
-} \
- \
-a_attr a_type * \
-a_prefix##_any(a_prefix##_t *ph) { \
- return ph_any(&ph->ph, offsetof(a_type, a_field)); \
-} \
- \
-a_attr void \
-a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \
- ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \
- a_prefix##_ph_cmp); \
-} \
- \
-a_attr a_type * \
-a_prefix##_remove_first(a_prefix##_t *ph) { \
- return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \
- a_prefix##_ph_cmp); \
-} \
- \
-a_attr void \
-a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \
- ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \
- a_prefix##_ph_cmp); \
-} \
- \
-a_attr a_type * \
-a_prefix##_remove_any(a_prefix##_t *ph) { \
- a_type *ret = a_prefix##_any(ph); \
- if (ret != NULL) { \
- a_prefix##_remove(ph, ret); \
- } \
- return ret; \
-}
-
-#endif /* JEMALLOC_INTERNAL_PH_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/private_namespace.sh b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/private_namespace.sh
deleted file mode 100755
index 6ef1346a..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/private_namespace.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-for symbol in `cat "$@"` ; do
- echo "#define ${symbol} JEMALLOC_N(${symbol})"
-done
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/private_symbols.sh b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/private_symbols.sh
deleted file mode 100755
index 442a259f..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/private_symbols.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/sh
-#
-# Generate private_symbols[_jet].awk.
-#
-# Usage: private_symbols.sh <sym_prefix> <sym>*
-#
-# <sym_prefix> is typically "" or "_".
-
-sym_prefix=$1
-shift
-
-cat <<EOF
-#!/usr/bin/env awk -f
-
-BEGIN {
- sym_prefix = "${sym_prefix}"
- split("\\
-EOF
-
-for public_sym in "$@" ; do
- cat <<EOF
- ${sym_prefix}${public_sym} \\
-EOF
-done
-
-cat <<"EOF"
- ", exported_symbol_names)
- # Store exported symbol names as keys in exported_symbols.
- for (i in exported_symbol_names) {
- exported_symbols[exported_symbol_names[i]] = 1
- }
-}
-
-# Process 'nm -a <c_source.o>' output.
-#
-# Handle lines like:
-# 0000000000000008 D opt_junk
-# 0000000000007574 T malloc_initialized
-(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
- print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
-}
-
-# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
-#
-# Handle lines like:
-# 353 00008098 SECT4 notype External | opt_junk
-# 3F1 00000000 SECT7 notype () External | malloc_initialized
-($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
- print $NF
-}
-EOF
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prng.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prng.h
deleted file mode 100644
index 14542aa1..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prng.h
+++ /dev/null
@@ -1,168 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PRNG_H
-#define JEMALLOC_INTERNAL_PRNG_H
-
-#include "jemalloc/internal/bit_util.h"
-
-/*
- * Simple linear congruential pseudo-random number generator:
- *
- * prng(y) = (a*x + c) % m
- *
- * where the following constants ensure maximal period:
- *
- * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
- * c == Odd number (relatively prime to 2^n).
- * m == 2^32
- *
- * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
- *
- * This choice of m has the disadvantage that the quality of the bits is
- * proportional to bit position. For example, the lowest bit has a cycle of 2,
- * the next has a cycle of 4, etc. For this reason, we prefer to use the upper
- * bits.
- */
-
-/******************************************************************************/
-/* INTERNAL DEFINITIONS -- IGNORE */
-/******************************************************************************/
-#define PRNG_A_32 UINT32_C(1103515241)
-#define PRNG_C_32 UINT32_C(12347)
-
-#define PRNG_A_64 UINT64_C(6364136223846793005)
-#define PRNG_C_64 UINT64_C(1442695040888963407)
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_state_next_u32(uint32_t state) {
- return (state * PRNG_A_32) + PRNG_C_32;
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_state_next_u64(uint64_t state) {
- return (state * PRNG_A_64) + PRNG_C_64;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_state_next_zu(size_t state) {
-#if LG_SIZEOF_PTR == 2
- return (state * PRNG_A_32) + PRNG_C_32;
-#elif LG_SIZEOF_PTR == 3
- return (state * PRNG_A_64) + PRNG_C_64;
-#else
-#error Unsupported pointer size
-#endif
-}
-
-/******************************************************************************/
-/* BEGIN PUBLIC API */
-/******************************************************************************/
-
-/*
- * The prng_lg_range functions give a uniform int in the half-open range [0,
- * 2**lg_range).
- */
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(uint32_t *state, unsigned lg_range) {
- assert(lg_range > 0);
- assert(lg_range <= 32);
-
- *state = prng_state_next_u32(*state);
- uint32_t ret = *state >> (32 - lg_range);
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
- assert(lg_range > 0);
- assert(lg_range <= 64);
-
- *state = prng_state_next_u64(*state);
- uint64_t ret = *state >> (64 - lg_range);
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(size_t *state, unsigned lg_range) {
- assert(lg_range > 0);
- assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
-
- *state = prng_state_next_zu(*state);
- size_t ret = *state >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
-
- return ret;
-}
-
-/*
- * The prng_range functions behave like the prng_lg_range, but return a result
- * in [0, range) instead of [0, 2**lg_range).
- */
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(uint32_t *state, uint32_t range) {
- assert(range != 0);
- /*
- * If range were 1, lg_range would be 0, so the shift in
- * prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits,
- * which is UB. Just handle this case as a one-off.
- */
- if (range == 1) {
- return 0;
- }
-
- /* Compute the ceiling of lg(range). */
- unsigned lg_range = ffs_u32(pow2_ceil_u32(range));
-
- /* Generate a result in [0..range) via repeated trial. */
- uint32_t ret;
- do {
- ret = prng_lg_range_u32(state, lg_range);
- } while (ret >= range);
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_range_u64(uint64_t *state, uint64_t range) {
- assert(range != 0);
-
- /* See the note in prng_range_u32. */
- if (range == 1) {
- return 0;
- }
-
- /* Compute the ceiling of lg(range). */
- unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
-
- /* Generate a result in [0..range) via repeated trial. */
- uint64_t ret;
- do {
- ret = prng_lg_range_u64(state, lg_range);
- } while (ret >= range);
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(size_t *state, size_t range) {
- assert(range != 0);
-
- /* See the note in prng_range_u32. */
- if (range == 1) {
- return 0;
- }
-
- /* Compute the ceiling of lg(range). */
- unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
-
- /* Generate a result in [0..range) via repeated trial. */
- size_t ret;
- do {
- ret = prng_lg_range_zu(state, lg_range);
- } while (ret >= range);
-
- return ret;
-}
-
-#endif /* JEMALLOC_INTERNAL_PRNG_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_data.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_data.h
deleted file mode 100644
index 4c8e22c7..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_data.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_DATA_H
-#define JEMALLOC_INTERNAL_PROF_DATA_H
-
-#include "jemalloc/internal/mutex.h"
-
-extern malloc_mutex_t bt2gctx_mtx;
-extern malloc_mutex_t tdatas_mtx;
-extern malloc_mutex_t prof_dump_mtx;
-
-extern malloc_mutex_t *gctx_locks;
-extern malloc_mutex_t *tdata_locks;
-
-extern size_t prof_unbiased_sz[PROF_SC_NSIZES];
-extern size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES];
-
-void prof_bt_hash(const void *key, size_t r_hash[2]);
-bool prof_bt_keycomp(const void *k1, const void *k2);
-
-bool prof_data_init(tsd_t *tsd);
-prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
-char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
-int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name);
-void prof_unbias_map_init();
-void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
- prof_tdata_t *tdata, bool leakcheck);
-prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid,
- uint64_t thr_discrim, char *thread_name, bool active);
-void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata);
-void prof_reset(tsd_t *tsd, size_t lg_sample);
-void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx);
-
-/* Used in unit tests. */
-size_t prof_tdata_count(void);
-size_t prof_bt_count(void);
-void prof_cnt_all(prof_cnt_t *cnt_all);
-
-#endif /* JEMALLOC_INTERNAL_PROF_DATA_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_externs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_externs.h
deleted file mode 100644
index bdff1349..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_externs.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
-#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
-
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/prof_hook.h"
-
-extern bool opt_prof;
-extern bool opt_prof_active;
-extern bool opt_prof_thread_active_init;
-extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
-extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
-extern bool opt_prof_gdump; /* High-water memory dumping. */
-extern bool opt_prof_final; /* Final profile dumping. */
-extern bool opt_prof_leak; /* Dump leak summary at exit. */
-extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */
-extern bool opt_prof_accum; /* Report cumulative bytes. */
-extern bool opt_prof_log; /* Turn logging on at boot. */
-extern char opt_prof_prefix[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PATH_MAX +
-#endif
- 1];
-extern bool opt_prof_unbias;
-
-/* For recording recent allocations */
-extern ssize_t opt_prof_recent_alloc_max;
-
-/* Whether to use thread name provided by the system or by mallctl. */
-extern bool opt_prof_sys_thread_name;
-
-/* Whether to record per size class counts and request size totals. */
-extern bool opt_prof_stats;
-
-/* Accessed via prof_active_[gs]et{_unlocked,}(). */
-extern bool prof_active_state;
-
-/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
-extern bool prof_gdump_val;
-
-/* Profile dump interval, measured in bytes allocated. */
-extern uint64_t prof_interval;
-
-/*
- * Initialized as opt_lg_prof_sample, and potentially modified during profiling
- * resets.
- */
-extern size_t lg_prof_sample;
-
-extern bool prof_booted;
-
-void prof_backtrace_hook_set(prof_backtrace_hook_t hook);
-prof_backtrace_hook_t prof_backtrace_hook_get();
-
-void prof_dump_hook_set(prof_dump_hook_t hook);
-prof_dump_hook_t prof_dump_hook_get();
-
-/* Functions only accessed in prof_inlines.h */
-prof_tdata_t *prof_tdata_init(tsd_t *tsd);
-prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
-
-void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx);
-void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
- size_t usize, prof_tctx_t *tctx);
-void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
-prof_tctx_t *prof_tctx_create(tsd_t *tsd);
-void prof_idump(tsdn_t *tsdn);
-bool prof_mdump(tsd_t *tsd, const char *filename);
-void prof_gdump(tsdn_t *tsdn);
-
-void prof_tdata_cleanup(tsd_t *tsd);
-bool prof_active_get(tsdn_t *tsdn);
-bool prof_active_set(tsdn_t *tsdn, bool active);
-const char *prof_thread_name_get(tsd_t *tsd);
-int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
-bool prof_thread_active_get(tsd_t *tsd);
-bool prof_thread_active_set(tsd_t *tsd, bool active);
-bool prof_thread_active_init_get(tsdn_t *tsdn);
-bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
-bool prof_gdump_get(tsdn_t *tsdn);
-bool prof_gdump_set(tsdn_t *tsdn, bool active);
-void prof_boot0(void);
-void prof_boot1(void);
-bool prof_boot2(tsd_t *tsd, base_t *base);
-void prof_prefork0(tsdn_t *tsdn);
-void prof_prefork1(tsdn_t *tsdn);
-void prof_postfork_parent(tsdn_t *tsdn);
-void prof_postfork_child(tsdn_t *tsdn);
-
-/* Only accessed by thread event. */
-uint64_t prof_sample_new_event_wait(tsd_t *tsd);
-uint64_t prof_sample_postponed_event_wait(tsd_t *tsd);
-void prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed);
-
-#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_hook.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_hook.h
deleted file mode 100644
index 150d19d3..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_hook.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_HOOK_H
-#define JEMALLOC_INTERNAL_PROF_HOOK_H
-
-/*
- * The hooks types of which are declared in this file are experimental and
- * undocumented, thus the typedefs are located in an 'internal' header.
- */
-
-/*
- * A hook to mock out backtrace functionality. This can be handy, since it's
- * otherwise difficult to guarantee that two allocations are reported as coming
- * from the exact same stack trace in the presence of an optimizing compiler.
- */
-typedef void (*prof_backtrace_hook_t)(void **, unsigned *, unsigned);
-
-/*
- * A callback hook that notifies about recently dumped heap profile.
- */
-typedef void (*prof_dump_hook_t)(const char *filename);
-
-#endif /* JEMALLOC_INTERNAL_PROF_HOOK_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_inlines.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_inlines.h
deleted file mode 100644
index a8e7e7fb..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_inlines.h
+++ /dev/null
@@ -1,261 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
-#define JEMALLOC_INTERNAL_PROF_INLINES_H
-
-#include "jemalloc/internal/safety_check.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/thread_event.h"
-
-JEMALLOC_ALWAYS_INLINE void
-prof_active_assert() {
- cassert(config_prof);
- /*
- * If opt_prof is off, then prof_active must always be off, regardless
- * of whether prof_active_mtx is in effect or not.
- */
- assert(opt_prof || !prof_active_state);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_active_get_unlocked(void) {
- prof_active_assert();
- /*
- * Even if opt_prof is true, sampling can be temporarily disabled by
- * setting prof_active to false. No locking is used when reading
- * prof_active in the fast path, so there are no guarantees regarding
- * how long it will take for all threads to notice state changes.
- */
- return prof_active_state;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_gdump_get_unlocked(void) {
- /*
- * No locking is used when reading prof_gdump_val in the fast path, so
- * there are no guarantees regarding how long it will take for all
- * threads to notice state changes.
- */
- return prof_gdump_val;
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tdata_t *
-prof_tdata_get(tsd_t *tsd, bool create) {
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- tdata = tsd_prof_tdata_get(tsd);
- if (create) {
- assert(tsd_reentrancy_level_get(tsd) == 0);
- if (unlikely(tdata == NULL)) {
- if (tsd_nominal(tsd)) {
- tdata = prof_tdata_init(tsd);
- tsd_prof_tdata_set(tsd, tdata);
- }
- } else if (unlikely(tdata->expired)) {
- tdata = prof_tdata_reinit(tsd, tdata);
- tsd_prof_tdata_set(tsd, tdata);
- }
- assert(tdata == NULL || tdata->attached);
- }
-
- return tdata;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
- prof_info_t *prof_info) {
- cassert(config_prof);
- assert(ptr != NULL);
- assert(prof_info != NULL);
-
- arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, false);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_info_get_and_reset_recent(tsd_t *tsd, const void *ptr,
- emap_alloc_ctx_t *alloc_ctx, prof_info_t *prof_info) {
- cassert(config_prof);
- assert(ptr != NULL);
- assert(prof_info != NULL);
-
- arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, true);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_tctx_reset(tsd, ptr, alloc_ctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_tctx_reset_sampled(tsd, ptr);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) {
- cassert(config_prof);
- assert(edata != NULL);
- assert((uintptr_t)tctx > (uintptr_t)1U);
-
- arena_prof_info_set(tsd, edata, tctx, size);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sample_should_skip(tsd_t *tsd, bool sample_event) {
- cassert(config_prof);
-
- /* Fastpath: no need to load tdata */
- if (likely(!sample_event)) {
- return true;
- }
-
- /*
- * sample_event is always obtained from the thread event module, and
- * whenever it's true, it means that the thread event module has
- * already checked the reentrancy level.
- */
- assert(tsd_reentrancy_level_get(tsd) == 0);
-
- prof_tdata_t *tdata = prof_tdata_get(tsd, true);
- if (unlikely(tdata == NULL)) {
- return true;
- }
-
- return !tdata->active;
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) {
- prof_tctx_t *ret;
-
- if (!prof_active ||
- likely(prof_sample_should_skip(tsd, sample_event))) {
- ret = (prof_tctx_t *)(uintptr_t)1U;
- } else {
- ret = prof_tctx_create(tsd);
- }
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_malloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
- emap_alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
-
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
- prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
- } else {
- prof_tctx_reset(tsd, ptr, alloc_ctx);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
- prof_tctx_t *tctx, bool prof_active, const void *old_ptr, size_t old_usize,
- prof_info_t *old_prof_info, bool sample_event) {
- bool sampled, old_sampled, moved;
-
- cassert(config_prof);
- assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
-
- if (prof_active && ptr != NULL) {
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
- if (prof_sample_should_skip(tsd, sample_event)) {
- /*
- * Don't sample. The usize passed to prof_alloc_prep()
- * was larger than what actually got allocated, so a
- * backtrace was captured for this allocation, even
- * though its actual usize was insufficient to cross the
- * sample threshold.
- */
- prof_alloc_rollback(tsd, tctx);
- tctx = (prof_tctx_t *)(uintptr_t)1U;
- }
- }
-
- sampled = ((uintptr_t)tctx > (uintptr_t)1U);
- old_sampled = ((uintptr_t)old_prof_info->alloc_tctx > (uintptr_t)1U);
- moved = (ptr != old_ptr);
-
- if (unlikely(sampled)) {
- prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
- } else if (moved) {
- prof_tctx_reset(tsd, ptr, NULL);
- } else if (unlikely(old_sampled)) {
- /*
- * prof_tctx_reset() would work for the !moved case as well,
- * but prof_tctx_reset_sampled() is slightly cheaper, and the
- * proper thing to do here in the presence of explicit
- * knowledge re: moved state.
- */
- prof_tctx_reset_sampled(tsd, ptr);
- } else {
- prof_info_t prof_info;
- prof_info_get(tsd, ptr, NULL, &prof_info);
- assert((uintptr_t)prof_info.alloc_tctx == (uintptr_t)1U);
- }
-
- /*
- * The prof_free_sampled_object() call must come after the
- * prof_malloc_sample_object() call, because tctx and old_tctx may be
- * the same, in which case reversing the call order could cause the tctx
- * to be prematurely destroyed as a side effect of momentarily zeroed
- * counters.
- */
- if (unlikely(old_sampled)) {
- prof_free_sampled_object(tsd, old_usize, old_prof_info);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prof_sample_align(size_t orig_align) {
- /*
- * Enforce page alignment, so that sampled allocations can be identified
- * w/o metadata lookup.
- */
- assert(opt_prof);
- return (opt_cache_oblivious && orig_align < PAGE) ? PAGE :
- orig_align;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sample_aligned(const void *ptr) {
- return ((uintptr_t)ptr & PAGE_MASK) == 0;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sampled(tsd_t *tsd, const void *ptr) {
- prof_info_t prof_info;
- prof_info_get(tsd, ptr, NULL, &prof_info);
- bool sampled = (uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U;
- if (sampled) {
- assert(prof_sample_aligned(ptr));
- }
- return sampled;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_free(tsd_t *tsd, const void *ptr, size_t usize,
- emap_alloc_ctx_t *alloc_ctx) {
- prof_info_t prof_info;
- prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
-
- cassert(config_prof);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
-
- if (unlikely((uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U)) {
- assert(prof_sample_aligned(ptr));
- prof_free_sampled_object(tsd, usize, &prof_info);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_PROF_INLINES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_log.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_log.h
deleted file mode 100644
index ccb557dd..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_log.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_LOG_H
-#define JEMALLOC_INTERNAL_PROF_LOG_H
-
-#include "jemalloc/internal/mutex.h"
-
-extern malloc_mutex_t log_mtx;
-
-void prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
-bool prof_log_init(tsd_t *tsdn);
-
-/* Used in unit tests. */
-size_t prof_log_bt_count(void);
-size_t prof_log_alloc_count(void);
-size_t prof_log_thr_count(void);
-bool prof_log_is_logging(void);
-bool prof_log_rep_check(void);
-void prof_log_dummy_set(bool new_value);
-
-bool prof_log_start(tsdn_t *tsdn, const char *filename);
-bool prof_log_stop(tsdn_t *tsdn);
-
-#endif /* JEMALLOC_INTERNAL_PROF_LOG_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_recent.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_recent.h
deleted file mode 100644
index df410236..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_recent.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_RECENT_H
-#define JEMALLOC_INTERNAL_PROF_RECENT_H
-
-extern malloc_mutex_t prof_recent_alloc_mtx;
-extern malloc_mutex_t prof_recent_dump_mtx;
-
-bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx);
-void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize);
-void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
-bool prof_recent_init();
-void edata_prof_recent_alloc_init(edata_t *edata);
-
-/* Used in unit tests. */
-typedef ql_head(prof_recent_t) prof_recent_list_t;
-extern prof_recent_list_t prof_recent_alloc_list;
-edata_t *prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *node);
-prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata);
-
-ssize_t prof_recent_alloc_max_ctl_read();
-ssize_t prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max);
-void prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque);
-
-#endif /* JEMALLOC_INTERNAL_PROF_RECENT_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_stats.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_stats.h
deleted file mode 100644
index 7954e82d..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_stats.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_STATS_H
-#define JEMALLOC_INTERNAL_PROF_STATS_H
-
-typedef struct prof_stats_s prof_stats_t;
-struct prof_stats_s {
- uint64_t req_sum;
- uint64_t count;
-};
-
-extern malloc_mutex_t prof_stats_mtx;
-
-void prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size);
-void prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size);
-void prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats);
-void prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats);
-
-#endif /* JEMALLOC_INTERNAL_PROF_STATS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_structs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_structs.h
deleted file mode 100644
index dd22115f..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_structs.h
+++ /dev/null
@@ -1,221 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
-#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
-
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/edata.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/rb.h"
-
-struct prof_bt_s {
- /* Backtrace, stored as len program counters. */
- void **vec;
- unsigned len;
-};
-
-#ifdef JEMALLOC_PROF_LIBGCC
-/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
-typedef struct {
- void **vec;
- unsigned *len;
- unsigned max;
-} prof_unwind_data_t;
-#endif
-
-struct prof_cnt_s {
- /* Profiling counters. */
- uint64_t curobjs;
- uint64_t curobjs_shifted_unbiased;
- uint64_t curbytes;
- uint64_t curbytes_unbiased;
- uint64_t accumobjs;
- uint64_t accumobjs_shifted_unbiased;
- uint64_t accumbytes;
- uint64_t accumbytes_unbiased;
-};
-
-typedef enum {
- prof_tctx_state_initializing,
- prof_tctx_state_nominal,
- prof_tctx_state_dumping,
- prof_tctx_state_purgatory /* Dumper must finish destroying. */
-} prof_tctx_state_t;
-
-struct prof_tctx_s {
- /* Thread data for thread that performed the allocation. */
- prof_tdata_t *tdata;
-
- /*
- * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
- * defunct during teardown.
- */
- uint64_t thr_uid;
- uint64_t thr_discrim;
-
- /*
- * Reference count of how many times this tctx object is referenced in
- * recent allocation / deallocation records, protected by tdata->lock.
- */
- uint64_t recent_count;
-
- /* Profiling counters, protected by tdata->lock. */
- prof_cnt_t cnts;
-
- /* Associated global context. */
- prof_gctx_t *gctx;
-
- /*
- * UID that distinguishes multiple tctx's created by the same thread,
- * but coexisting in gctx->tctxs. There are two ways that such
- * coexistence can occur:
- * - A dumper thread can cause a tctx to be retained in the purgatory
- * state.
- * - Although a single "producer" thread must create all tctx's which
- * share the same thr_uid, multiple "consumers" can each concurrently
- * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
- * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
- * threshold can be hit again before the first consumer finishes
- * executing prof_tctx_destroy().
- */
- uint64_t tctx_uid;
-
- /* Linkage into gctx's tctxs. */
- rb_node(prof_tctx_t) tctx_link;
-
- /*
- * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
- * sample vs destroy race.
- */
- bool prepared;
-
- /* Current dump-related state, protected by gctx->lock. */
- prof_tctx_state_t state;
-
- /*
- * Copy of cnts snapshotted during early dump phase, protected by
- * dump_mtx.
- */
- prof_cnt_t dump_cnts;
-};
-typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
-
-struct prof_info_s {
- /* Time when the allocation was made. */
- nstime_t alloc_time;
- /* Points to the prof_tctx_t corresponding to the allocation. */
- prof_tctx_t *alloc_tctx;
- /* Allocation request size. */
- size_t alloc_size;
-};
-
-struct prof_gctx_s {
- /* Protects nlimbo, cnt_summed, and tctxs. */
- malloc_mutex_t *lock;
-
- /*
- * Number of threads that currently cause this gctx to be in a state of
- * limbo due to one of:
- * - Initializing this gctx.
- * - Initializing per thread counters associated with this gctx.
- * - Preparing to destroy this gctx.
- * - Dumping a heap profile that includes this gctx.
- * nlimbo must be 1 (single destroyer) in order to safely destroy the
- * gctx.
- */
- unsigned nlimbo;
-
- /*
- * Tree of profile counters, one for each thread that has allocated in
- * this context.
- */
- prof_tctx_tree_t tctxs;
-
- /* Linkage for tree of contexts to be dumped. */
- rb_node(prof_gctx_t) dump_link;
-
- /* Temporary storage for summation during dump. */
- prof_cnt_t cnt_summed;
-
- /* Associated backtrace. */
- prof_bt_t bt;
-
- /* Backtrace vector, variable size, referred to by bt. */
- void *vec[1];
-};
-typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
-
-struct prof_tdata_s {
- malloc_mutex_t *lock;
-
- /* Monotonically increasing unique thread identifier. */
- uint64_t thr_uid;
-
- /*
- * Monotonically increasing discriminator among tdata structures
- * associated with the same thr_uid.
- */
- uint64_t thr_discrim;
-
- /* Included in heap profile dumps if non-NULL. */
- char *thread_name;
-
- bool attached;
- bool expired;
-
- rb_node(prof_tdata_t) tdata_link;
-
- /*
- * Counter used to initialize prof_tctx_t's tctx_uid. No locking is
- * necessary when incrementing this field, because only one thread ever
- * does so.
- */
- uint64_t tctx_uid_next;
-
- /*
- * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
- * backtraces for which it has non-zero allocation/deallocation counters
- * associated with thread-specific prof_tctx_t objects. Other threads
- * may write to prof_tctx_t contents when freeing associated objects.
- */
- ckh_t bt2tctx;
-
- /* State used to avoid dumping while operating on prof internals. */
- bool enq;
- bool enq_idump;
- bool enq_gdump;
-
- /*
- * Set to true during an early dump phase for tdata's which are
- * currently being dumped. New threads' tdata's have this initialized
- * to false so that they aren't accidentally included in later dump
- * phases.
- */
- bool dumping;
-
- /*
- * True if profiling is active for this tdata's thread
- * (thread.prof.active mallctl).
- */
- bool active;
-
- /* Temporary storage for summation during dump. */
- prof_cnt_t cnt_summed;
-
- /* Backtrace vector, used for calls to prof_backtrace(). */
- void *vec[PROF_BT_MAX];
-};
-typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
-
-struct prof_recent_s {
- nstime_t alloc_time;
- nstime_t dalloc_time;
-
- ql_elm(prof_recent_t) link;
- size_t size;
- size_t usize;
- atomic_p_t alloc_edata; /* NULL means allocation has been freed. */
- prof_tctx_t *alloc_tctx;
- prof_tctx_t *dalloc_tctx;
-};
-
-#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_sys.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_sys.h
deleted file mode 100644
index 3d25a429..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_sys.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_SYS_H
-#define JEMALLOC_INTERNAL_PROF_SYS_H
-
-extern malloc_mutex_t prof_dump_filename_mtx;
-extern base_t *prof_base;
-
-void bt_init(prof_bt_t *bt, void **vec);
-void prof_backtrace(tsd_t *tsd, prof_bt_t *bt);
-void prof_hooks_init();
-void prof_unwind_init();
-void prof_sys_thread_name_fetch(tsd_t *tsd);
-int prof_getpid(void);
-void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
-bool prof_prefix_set(tsdn_t *tsdn, const char *prefix);
-void prof_fdump_impl(tsd_t *tsd);
-void prof_idump_impl(tsd_t *tsd);
-bool prof_mdump_impl(tsd_t *tsd, const char *filename);
-void prof_gdump_impl(tsd_t *tsd);
-
-/* Used in unit tests. */
-typedef int (prof_sys_thread_name_read_t)(char *buf, size_t limit);
-extern prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read;
-typedef int (prof_dump_open_file_t)(const char *, int);
-extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file;
-typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t);
-extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file;
-typedef int (prof_dump_open_maps_t)();
-extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps;
-
-#endif /* JEMALLOC_INTERNAL_PROF_SYS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_types.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_types.h
deleted file mode 100644
index ba628654..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/prof_types.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
-#define JEMALLOC_INTERNAL_PROF_TYPES_H
-
-typedef struct prof_bt_s prof_bt_t;
-typedef struct prof_cnt_s prof_cnt_t;
-typedef struct prof_tctx_s prof_tctx_t;
-typedef struct prof_info_s prof_info_t;
-typedef struct prof_gctx_s prof_gctx_t;
-typedef struct prof_tdata_s prof_tdata_t;
-typedef struct prof_recent_s prof_recent_t;
-
-/* Option defaults. */
-#ifdef JEMALLOC_PROF
-# define PROF_PREFIX_DEFAULT "jeprof"
-#else
-# define PROF_PREFIX_DEFAULT ""
-#endif
-#define LG_PROF_SAMPLE_DEFAULT 19
-#define LG_PROF_INTERVAL_DEFAULT -1
-
-/*
- * Hard limit on stack backtrace depth. The version of prof_backtrace() that
- * is based on __builtin_return_address() necessarily has a hard-coded number
- * of backtrace frame handlers, and should be kept in sync with this setting.
- */
-#define PROF_BT_MAX 128
-
-/* Initial hash table size. */
-#define PROF_CKH_MINITEMS 64
-
-/* Size of memory buffer to use when writing dump files. */
-#ifndef JEMALLOC_PROF
-/* Minimize memory bloat for non-prof builds. */
-# define PROF_DUMP_BUFSIZE 1
-#elif defined(JEMALLOC_DEBUG)
-/* Use a small buffer size in debug build, mainly to facilitate testing. */
-# define PROF_DUMP_BUFSIZE 16
-#else
-# define PROF_DUMP_BUFSIZE 65536
-#endif
-
-/* Size of size class related tables */
-#ifdef JEMALLOC_PROF
-# define PROF_SC_NSIZES SC_NSIZES
-#else
-/* Minimize memory bloat for non-prof builds. */
-# define PROF_SC_NSIZES 1
-#endif
-
-/* Size of stack-allocated buffer used by prof_printf(). */
-#define PROF_PRINTF_BUFSIZE 128
-
-/*
- * Number of mutexes shared among all gctx's. No space is allocated for these
- * unless profiling is enabled, so it's okay to over-provision.
- */
-#define PROF_NCTX_LOCKS 1024
-
-/*
- * Number of mutexes shared among all tdata's. No space is allocated for these
- * unless profiling is enabled, so it's okay to over-provision.
- */
-#define PROF_NTDATA_LOCKS 256
-
-/* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
-#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
-#else
-#define PROF_DUMP_FILENAME_LEN 1
-#endif
-
-/* Default number of recent allocations to record. */
-#define PROF_RECENT_ALLOC_MAX_DEFAULT 0
-
-#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/psset.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/psset.h
deleted file mode 100644
index e1d64970..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/psset.h
+++ /dev/null
@@ -1,131 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PSSET_H
-#define JEMALLOC_INTERNAL_PSSET_H
-
-#include "jemalloc/internal/hpdata.h"
-
-/*
- * A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains
- * a collection of page-slabs (the intent being that they are backed by
- * hugepages, or at least could be), and handles allocation and deallocation
- * requests.
- */
-
-/*
- * One more than the maximum pszind_t we will serve out of the HPA.
- * Practically, we expect only the first few to be actually used. This
- * corresponds to a maximum size of of 512MB on systems with 4k pages and
- * SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you
- * can think of this as being SC_NPSIZES, but there's no sense in wasting that
- * much space in the arena, making bitmaps that much larger, etc.
- */
-#define PSSET_NPSIZES 64
-
-/*
- * We keep two purge lists per page size class; one for hugified hpdatas (at
- * index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind +
- * 1). This lets us implement a preference for purging non-hugified hpdatas
- * among similarly-dirty ones.
- * We reserve the last two indices for empty slabs, in that case purging
- * hugified ones (which are definitionally all waste) before non-hugified ones
- * (i.e. reversing the order).
- */
-#define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES)
-
-typedef struct psset_bin_stats_s psset_bin_stats_t;
-struct psset_bin_stats_s {
- /* How many pageslabs are in this bin? */
- size_t npageslabs;
- /* Of them, how many pages are active? */
- size_t nactive;
- /* And how many are dirty? */
- size_t ndirty;
-};
-
-typedef struct psset_stats_s psset_stats_t;
-struct psset_stats_s {
- /*
- * The second index is huge stats; nonfull_slabs[pszind][0] contains
- * stats for the non-huge slabs in bucket pszind, while
- * nonfull_slabs[pszind][1] contains stats for the huge slabs.
- */
- psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][2];
-
- /*
- * Full slabs don't live in any edata heap, but we still track their
- * stats.
- */
- psset_bin_stats_t full_slabs[2];
-
- /* Empty slabs are similar. */
- psset_bin_stats_t empty_slabs[2];
-};
-
-typedef struct psset_s psset_t;
-struct psset_s {
- /*
- * The pageslabs, quantized by the size class of the largest contiguous
- * free run of pages in a pageslab.
- */
- hpdata_age_heap_t pageslabs[PSSET_NPSIZES];
- /* Bitmap for which set bits correspond to non-empty heaps. */
- fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)];
- /*
- * The sum of all bin stats in stats. This lets us quickly answer
- * queries for the number of dirty, active, and retained pages in the
- * entire set.
- */
- psset_bin_stats_t merged_stats;
- psset_stats_t stats;
- /*
- * Slabs with no active allocations, but which are allowed to serve new
- * allocations.
- */
- hpdata_empty_list_t empty;
- /*
- * Slabs which are available to be purged, ordered by how much we want
- * to purge them (with later indices indicating slabs we want to purge
- * more).
- */
- hpdata_purge_list_t to_purge[PSSET_NPURGE_LISTS];
- /* Bitmap for which set bits correspond to non-empty purge lists. */
- fb_group_t purge_bitmap[FB_NGROUPS(PSSET_NPURGE_LISTS)];
- /* Slabs which are available to be hugified. */
- hpdata_hugify_list_t to_hugify;
-};
-
-void psset_init(psset_t *psset);
-void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src);
-
-/*
- * Begin or end updating the given pageslab's metadata. While the pageslab is
- * being updated, it won't be returned from psset_fit calls.
- */
-void psset_update_begin(psset_t *psset, hpdata_t *ps);
-void psset_update_end(psset_t *psset, hpdata_t *ps);
-
-/* Analogous to the eset_fit; pick a hpdata to serve the request. */
-hpdata_t *psset_pick_alloc(psset_t *psset, size_t size);
-/* Pick one to purge. */
-hpdata_t *psset_pick_purge(psset_t *psset);
-/* Pick one to hugify. */
-hpdata_t *psset_pick_hugify(psset_t *psset);
-
-void psset_insert(psset_t *psset, hpdata_t *ps);
-void psset_remove(psset_t *psset, hpdata_t *ps);
-
-static inline size_t
-psset_npageslabs(psset_t *psset) {
- return psset->merged_stats.npageslabs;
-}
-
-static inline size_t
-psset_nactive(psset_t *psset) {
- return psset->merged_stats.nactive;
-}
-
-static inline size_t
-psset_ndirty(psset_t *psset) {
- return psset->merged_stats.ndirty;
-}
-
-#endif /* JEMALLOC_INTERNAL_PSSET_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/public_namespace.sh b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/public_namespace.sh
deleted file mode 100755
index 4d415ba0..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/public_namespace.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-for nm in `cat $1` ; do
- n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
- echo "#define je_${n} JEMALLOC_N(${n})"
-done
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/public_unnamespace.sh b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/public_unnamespace.sh
deleted file mode 100755
index 4239d177..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/public_unnamespace.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-for nm in `cat $1` ; do
- n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
- echo "#undef je_${n}"
-done
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ql.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ql.h
deleted file mode 100644
index c7f52f86..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ql.h
+++ /dev/null
@@ -1,197 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_QL_H
-#define JEMALLOC_INTERNAL_QL_H
-
-#include "jemalloc/internal/qr.h"
-
-/*
- * A linked-list implementation.
- *
- * This is built on top of the ring implementation, but that can be viewed as an
- * implementation detail (i.e. trying to advance past the tail of the list
- * doesn't wrap around).
- *
- * You define a struct like so:
- * typedef strucy my_s my_t;
- * struct my_s {
- * int data;
- * ql_elm(my_t) my_link;
- * };
- *
- * // We wobble between "list" and "head" for this type; we're now mostly
- * // heading towards "list".
- * typedef ql_head(my_t) my_list_t;
- *
- * You then pass a my_list_t * for a_head arguments, a my_t * for a_elm
- * arguments, the token "my_link" for a_field arguments, and the token "my_t"
- * for a_type arguments.
- */
-
-/* List definitions. */
-#define ql_head(a_type) \
-struct { \
- a_type *qlh_first; \
-}
-
-/* Static initializer for an empty list. */
-#define ql_head_initializer(a_head) {NULL}
-
-/* The field definition. */
-#define ql_elm(a_type) qr(a_type)
-
-/* A pointer to the first element in the list, or NULL if the list is empty. */
-#define ql_first(a_head) ((a_head)->qlh_first)
-
-/* Dynamically initializes a list. */
-#define ql_new(a_head) do { \
- ql_first(a_head) = NULL; \
-} while (0)
-
-/*
- * Sets dest to be the contents of src (overwriting any elements there), leaving
- * src empty.
- */
-#define ql_move(a_head_dest, a_head_src) do { \
- ql_first(a_head_dest) = ql_first(a_head_src); \
- ql_new(a_head_src); \
-} while (0)
-
-/* True if the list is empty, otherwise false. */
-#define ql_empty(a_head) (ql_first(a_head) == NULL)
-
-/*
- * Initializes a ql_elm. Must be called even if the field is about to be
- * overwritten.
- */
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-
-/*
- * Obtains the last item in the list.
- */
-#define ql_last(a_head, a_field) \
- (ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field))
-
-/*
- * Gets a pointer to the next/prev element in the list. Trying to advance past
- * the end or retreat before the beginning of the list returns NULL.
- */
-#define ql_next(a_head, a_elm, a_field) \
- ((ql_last(a_head, a_field) != (a_elm)) \
- ? qr_next((a_elm), a_field) : NULL)
-#define ql_prev(a_head, a_elm, a_field) \
- ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
- : NULL)
-
-/* Inserts a_elm before a_qlelm in the list. */
-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
- qr_before_insert((a_qlelm), (a_elm), a_field); \
- if (ql_first(a_head) == (a_qlelm)) { \
- ql_first(a_head) = (a_elm); \
- } \
-} while (0)
-
-/* Inserts a_elm after a_qlelm in the list. */
-#define ql_after_insert(a_qlelm, a_elm, a_field) \
- qr_after_insert((a_qlelm), (a_elm), a_field)
-
-/* Inserts a_elm as the first item in the list. */
-#define ql_head_insert(a_head, a_elm, a_field) do { \
- if (!ql_empty(a_head)) { \
- qr_before_insert(ql_first(a_head), (a_elm), a_field); \
- } \
- ql_first(a_head) = (a_elm); \
-} while (0)
-
-/* Inserts a_elm as the last item in the list. */
-#define ql_tail_insert(a_head, a_elm, a_field) do { \
- if (!ql_empty(a_head)) { \
- qr_before_insert(ql_first(a_head), (a_elm), a_field); \
- } \
- ql_first(a_head) = qr_next((a_elm), a_field); \
-} while (0)
-
-/*
- * Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in:
- * a = [a1, ..., a_n, b_1, ..., b_n] and b = [].
- */
-#define ql_concat(a_head_a, a_head_b, a_field) do { \
- if (ql_empty(a_head_a)) { \
- ql_move(a_head_a, a_head_b); \
- } else if (!ql_empty(a_head_b)) { \
- qr_meld(ql_first(a_head_a), ql_first(a_head_b), \
- a_field); \
- ql_new(a_head_b); \
- } \
-} while (0)
-
-/* Removes a_elm from the list. */
-#define ql_remove(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) == (a_elm)) { \
- ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
- } \
- if (ql_first(a_head) != (a_elm)) { \
- qr_remove((a_elm), a_field); \
- } else { \
- ql_new(a_head); \
- } \
-} while (0)
-
-/* Removes the first item in the list. */
-#define ql_head_remove(a_head, a_type, a_field) do { \
- a_type *t = ql_first(a_head); \
- ql_remove((a_head), t, a_field); \
-} while (0)
-
-/* Removes the last item in the list. */
-#define ql_tail_remove(a_head, a_type, a_field) do { \
- a_type *t = ql_last(a_head, a_field); \
- ql_remove((a_head), t, a_field); \
-} while (0)
-
-/*
- * Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...],
- * ql_split(a, a_n, b, some_field) results in
- * a = [a_1, a_2, ..., a_n-1]
- * and replaces b's contents with:
- * b = [a_n, a_n+1, ...]
- */
-#define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \
- if (ql_first(a_head_a) == (a_elm)) { \
- ql_move(a_head_b, a_head_a); \
- } else { \
- qr_split(ql_first(a_head_a), (a_elm), a_field); \
- ql_first(a_head_b) = (a_elm); \
- } \
-} while (0)
-
-/*
- * An optimized version of:
- * a_type *t = ql_first(a_head);
- * ql_remove((a_head), t, a_field);
- * ql_tail_insert((a_head), t, a_field);
- */
-#define ql_rotate(a_head, a_field) do { \
- ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
-} while (0)
-
-/*
- * Helper macro to iterate over each element in a list in order, starting from
- * the head (or in reverse order, starting from the tail). The usage is
- * (assuming my_t and my_list_t defined as above).
- *
- * int sum(my_list_t *list) {
- * int sum = 0;
- * my_t *iter;
- * ql_foreach(iter, list, link) {
- * sum += iter->data;
- * }
- * return sum;
- * }
- */
-
-#define ql_foreach(a_var, a_head, a_field) \
- qr_foreach((a_var), ql_first(a_head), a_field)
-
-#define ql_reverse_foreach(a_var, a_head, a_field) \
- qr_reverse_foreach((a_var), ql_first(a_head), a_field)
-
-#endif /* JEMALLOC_INTERNAL_QL_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/qr.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/qr.h
deleted file mode 100644
index ece4f556..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/qr.h
+++ /dev/null
@@ -1,140 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_QR_H
-#define JEMALLOC_INTERNAL_QR_H
-
-/*
- * A ring implementation based on an embedded circular doubly-linked list.
- *
- * You define your struct like so:
- *
- * typedef struct my_s my_t;
- * struct my_s {
- * int data;
- * qr(my_t) my_link;
- * };
- *
- * And then pass a my_t * into macros for a_qr arguments, and the token
- * "my_link" into a_field fields.
- */
-
-/* Ring definitions. */
-#define qr(a_type) \
-struct { \
- a_type *qre_next; \
- a_type *qre_prev; \
-}
-
-/*
- * Initialize a qr link. Every link must be initialized before being used, even
- * if that initialization is going to be immediately overwritten (say, by being
- * passed into an insertion macro).
- */
-#define qr_new(a_qr, a_field) do { \
- (a_qr)->a_field.qre_next = (a_qr); \
- (a_qr)->a_field.qre_prev = (a_qr); \
-} while (0)
-
-/*
- * Go forwards or backwards in the ring. Note that (the ring being circular), this
- * always succeeds -- you just keep looping around and around the ring if you
- * chase pointers without end.
- */
-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-
-/*
- * Given two rings:
- * a -> a_1 -> ... -> a_n --
- * ^ |
- * |------------------------
- *
- * b -> b_1 -> ... -> b_n --
- * ^ |
- * |------------------------
- *
- * Results in the ring:
- * a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
- * ^ |
- * |-------------------------------------------------|
- *
- * a_qr_a can directly be a qr_next() macro, but a_qr_b cannot.
- */
-#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
- (a_qr_b)->a_field.qre_prev->a_field.qre_next = \
- (a_qr_a)->a_field.qre_prev; \
- (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
- (a_qr_b)->a_field.qre_prev = \
- (a_qr_b)->a_field.qre_prev->a_field.qre_next; \
- (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
- (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
-} while (0)
-
-/*
- * Logically, this is just a meld. The intent, though, is that a_qrelm is a
- * single-element ring, so that "before" has a more obvious interpretation than
- * meld.
- */
-#define qr_before_insert(a_qrelm, a_qr, a_field) \
- qr_meld((a_qrelm), (a_qr), a_field)
-
-/* Ditto, but inserting after rather than before. */
-#define qr_after_insert(a_qrelm, a_qr, a_field) \
- qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field)
-
-/*
- * Inverts meld; given the ring:
- * a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
- * ^ |
- * |-------------------------------------------------|
- *
- * Results in two rings:
- * a -> a_1 -> ... -> a_n --
- * ^ |
- * |------------------------
- *
- * b -> b_1 -> ... -> b_n --
- * ^ |
- * |------------------------
- *
- * qr_meld() and qr_split() are functionally equivalent, so there's no need to
- * have two copies of the code.
- */
-#define qr_split(a_qr_a, a_qr_b, a_field) \
- qr_meld((a_qr_a), (a_qr_b), a_field)
-
-/*
- * Splits off a_qr from the rest of its ring, so that it becomes a
- * single-element ring.
- */
-#define qr_remove(a_qr, a_field) \
- qr_split(qr_next(a_qr, a_field), (a_qr), a_field)
-
-/*
- * Helper macro to iterate over each element in a ring exactly once, starting
- * with a_qr. The usage is (assuming my_t defined as above):
- *
- * int sum(my_t *item) {
- * int sum = 0;
- * my_t *iter;
- * qr_foreach(iter, item, link) {
- * sum += iter->data;
- * }
- * return sum;
- * }
- */
-#define qr_foreach(var, a_qr, a_field) \
- for ((var) = (a_qr); \
- (var) != NULL; \
- (var) = (((var)->a_field.qre_next != (a_qr)) \
- ? (var)->a_field.qre_next : NULL))
-
-/*
- * The same (and with the same usage) as qr_foreach, but in the opposite order,
- * ending with a_qr.
- */
-#define qr_reverse_foreach(var, a_qr, a_field) \
- for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
- (var) != NULL; \
- (var) = (((var) != (a_qr)) \
- ? (var)->a_field.qre_prev : NULL))
-
-#endif /* JEMALLOC_INTERNAL_QR_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/quantum.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/quantum.h
deleted file mode 100644
index c22d753a..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/quantum.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_QUANTUM_H
-#define JEMALLOC_INTERNAL_QUANTUM_H
-
-/*
- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
- * classes).
- */
-#ifndef LG_QUANTUM
-# if (defined(__i386__) || defined(_M_IX86))
-# define LG_QUANTUM 4
-# endif
-# ifdef __ia64__
-# define LG_QUANTUM 4
-# endif
-# ifdef __alpha__
-# define LG_QUANTUM 4
-# endif
-# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
-# define LG_QUANTUM 4
-# endif
-# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
-# define LG_QUANTUM 4
-# endif
-# ifdef __arm__
-# define LG_QUANTUM 3
-# endif
-# ifdef __aarch64__
-# define LG_QUANTUM 4
-# endif
-# ifdef __hppa__
-# define LG_QUANTUM 4
-# endif
-# ifdef __loongarch__
-# define LG_QUANTUM 4
-# endif
-# ifdef __m68k__
-# define LG_QUANTUM 3
-# endif
-# ifdef __mips__
-# if defined(__mips_n32) || defined(__mips_n64)
-# define LG_QUANTUM 4
-# else
-# define LG_QUANTUM 3
-# endif
-# endif
-# ifdef __nios2__
-# define LG_QUANTUM 3
-# endif
-# ifdef __or1k__
-# define LG_QUANTUM 3
-# endif
-# ifdef __powerpc__
-# define LG_QUANTUM 4
-# endif
-# if defined(__riscv) || defined(__riscv__)
-# define LG_QUANTUM 4
-# endif
-# ifdef __s390__
-# define LG_QUANTUM 4
-# endif
-# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
- defined(__SH4_SINGLE_ONLY__))
-# define LG_QUANTUM 4
-# endif
-# ifdef __tile__
-# define LG_QUANTUM 4
-# endif
-# ifdef __le32__
-# define LG_QUANTUM 4
-# endif
-# ifdef __arc__
-# define LG_QUANTUM 3
-# endif
-# ifndef LG_QUANTUM
-# error "Unknown minimum alignment for architecture; specify via "
- "--with-lg-quantum"
-# endif
-#endif
-
-#define QUANTUM ((size_t)(1U << LG_QUANTUM))
-#define QUANTUM_MASK (QUANTUM - 1)
-
-/* Return the smallest quantum multiple that is >= a. */
-#define QUANTUM_CEILING(a) \
- (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
-
-#endif /* JEMALLOC_INTERNAL_QUANTUM_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rb.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rb.h
deleted file mode 100644
index a9a51cb6..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rb.h
+++ /dev/null
@@ -1,1856 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_RB_H
-#define JEMALLOC_INTERNAL_RB_H
-
-/*-
- *******************************************************************************
- *
- * cpp macro implementation of left-leaning 2-3 red-black trees. Parent
- * pointers are not used, and color bits are stored in the least significant
- * bit of right-child pointers (if RB_COMPACT is defined), thus making node
- * linkage as compact as is possible for red-black trees.
- *
- * Usage:
- *
- * #include <stdint.h>
- * #include <stdbool.h>
- * #define NDEBUG // (Optional, see assert(3).)
- * #include <assert.h>
- * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
- * #include <rb.h>
- * ...
- *
- *******************************************************************************
- */
-
-#ifndef __PGI
-#define RB_COMPACT
-#endif
-
-/*
- * Each node in the RB tree consumes at least 1 byte of space (for the linkage
- * if nothing else, so there are a maximum of sizeof(void *) << 3 rb tree nodes
- * in any process (and thus, at most sizeof(void *) << 3 nodes in any rb tree).
- * The choice of algorithm bounds the depth of a tree to twice the binary log of
- * the number of elements in the tree; the following bound follows.
- */
-#define RB_MAX_DEPTH (sizeof(void *) << 4)
-
-#ifdef RB_COMPACT
-/* Node structure. */
-#define rb_node(a_type) \
-struct { \
- a_type *rbn_left; \
- a_type *rbn_right_red; \
-}
-#else
-#define rb_node(a_type) \
-struct { \
- a_type *rbn_left; \
- a_type *rbn_right; \
- bool rbn_red; \
-}
-#endif
-
-/* Root structure. */
-#define rb_tree(a_type) \
-struct { \
- a_type *rbt_root; \
-}
-
-/* Left accessors. */
-#define rbtn_left_get(a_type, a_field, a_node) \
- ((a_node)->a_field.rbn_left)
-#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
- (a_node)->a_field.rbn_left = a_left; \
-} while (0)
-
-#ifdef RB_COMPACT
-/* Right accessors. */
-#define rbtn_right_get(a_type, a_field, a_node) \
- ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
- & ((ssize_t)-2)))
-#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
- (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
- | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
-} while (0)
-
-/* Color accessors. */
-#define rbtn_red_get(a_type, a_field, a_node) \
- ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
- & ((size_t)1)))
-#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
- (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
- (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
- | ((ssize_t)a_red)); \
-} while (0)
-#define rbtn_red_set(a_type, a_field, a_node) do { \
- (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
- (a_node)->a_field.rbn_right_red) | ((size_t)1)); \
-} while (0)
-#define rbtn_black_set(a_type, a_field, a_node) do { \
- (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
- (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
-} while (0)
-
-/* Node initializer. */
-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
- /* Bookkeeping bit cannot be used by node pointer. */ \
- assert(((uintptr_t)(a_node) & 0x1) == 0); \
- rbtn_left_set(a_type, a_field, (a_node), NULL); \
- rbtn_right_set(a_type, a_field, (a_node), NULL); \
- rbtn_red_set(a_type, a_field, (a_node)); \
-} while (0)
-#else
-/* Right accessors. */
-#define rbtn_right_get(a_type, a_field, a_node) \
- ((a_node)->a_field.rbn_right)
-#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
- (a_node)->a_field.rbn_right = a_right; \
-} while (0)
-
-/* Color accessors. */
-#define rbtn_red_get(a_type, a_field, a_node) \
- ((a_node)->a_field.rbn_red)
-#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
- (a_node)->a_field.rbn_red = (a_red); \
-} while (0)
-#define rbtn_red_set(a_type, a_field, a_node) do { \
- (a_node)->a_field.rbn_red = true; \
-} while (0)
-#define rbtn_black_set(a_type, a_field, a_node) do { \
- (a_node)->a_field.rbn_red = false; \
-} while (0)
-
-/* Node initializer. */
-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
- rbtn_left_set(a_type, a_field, (a_node), NULL); \
- rbtn_right_set(a_type, a_field, (a_node), NULL); \
- rbtn_red_set(a_type, a_field, (a_node)); \
-} while (0)
-#endif
-
-/* Tree initializer. */
-#define rb_new(a_type, a_field, a_rbt) do { \
- (a_rbt)->rbt_root = NULL; \
-} while (0)
-
-/* Internal utility macros. */
-#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
- (r_node) = (a_root); \
- if ((r_node) != NULL) { \
- for (; \
- rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
- (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
- } \
- } \
-} while (0)
-
-#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
- (r_node) = (a_root); \
- if ((r_node) != NULL) { \
- for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
- (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
- } \
- } \
-} while (0)
-
-#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
- (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
- rbtn_right_set(a_type, a_field, (a_node), \
- rbtn_left_get(a_type, a_field, (r_node))); \
- rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
-} while (0)
-
-#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
- (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
- rbtn_left_set(a_type, a_field, (a_node), \
- rbtn_right_get(a_type, a_field, (r_node))); \
- rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
-} while (0)
-
-#define rb_summarized_only_false(...)
-#define rb_summarized_only_true(...) __VA_ARGS__
-#define rb_empty_summarize(a_node, a_lchild, a_rchild) false
-
-/*
- * The rb_proto() and rb_summarized_proto() macros generate function prototypes
- * that correspond to the functions generated by an equivalently parameterized
- * call to rb_gen() or rb_summarized_gen(), respectively.
- */
-
-#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
- rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, false)
-#define rb_summarized_proto(a_attr, a_prefix, a_rbt_type, a_type) \
- rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, true)
-#define rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, \
- a_is_summarized) \
-a_attr void \
-a_prefix##new(a_rbt_type *rbtree); \
-a_attr bool \
-a_prefix##empty(a_rbt_type *rbtree); \
-a_attr a_type * \
-a_prefix##first(a_rbt_type *rbtree); \
-a_attr a_type * \
-a_prefix##last(a_rbt_type *rbtree); \
-a_attr a_type * \
-a_prefix##next(a_rbt_type *rbtree, a_type *node); \
-a_attr a_type * \
-a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
-a_attr a_type * \
-a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
-a_attr a_type * \
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
-a_attr a_type * \
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
-a_attr void \
-a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
-a_attr void \
-a_prefix##remove(a_rbt_type *rbtree, a_type *node); \
-a_attr a_type * \
-a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
- a_rbt_type *, a_type *, void *), void *arg); \
-a_attr a_type * \
-a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
-a_attr void \
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg); \
-/* Extended API */ \
-rb_summarized_only_##a_is_summarized( \
-a_attr void \
-a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node); \
-a_attr bool \
-a_prefix##empty_filtered(a_rbt_type *rbtree, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##first_filtered(a_rbt_type *rbtree, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##last_filtered(a_rbt_type *rbtree, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-a_attr a_type * \
-a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx); \
-)
-
-/*
- * The rb_gen() macro generates a type-specific red-black tree implementation,
- * based on the above cpp macros.
- * Arguments:
- *
- * a_attr:
- * Function attribute for generated functions (ex: static).
- * a_prefix:
- * Prefix for generated functions (ex: ex_).
- * a_rb_type:
- * Type for red-black tree data structure (ex: ex_t).
- * a_type:
- * Type for red-black tree node data structure (ex: ex_node_t).
- * a_field:
- * Name of red-black tree node linkage (ex: ex_link).
- * a_cmp:
- * Node comparison function name, with the following prototype:
- *
- * int a_cmp(a_type *a_node, a_type *a_other);
- * ^^^^^^
- * or a_key
- * Interpretation of comparison function return values:
- * -1 : a_node < a_other
- * 0 : a_node == a_other
- * 1 : a_node > a_other
- * In all cases, the a_node or a_key macro argument is the first argument to
- * the comparison function, which makes it possible to write comparison
- * functions that treat the first argument specially. a_cmp must be a total
- * order on values inserted into the tree -- duplicates are not allowed.
- *
- * Assuming the following setup:
- *
- * typedef struct ex_node_s ex_node_t;
- * struct ex_node_s {
- * rb_node(ex_node_t) ex_link;
- * };
- * typedef rb_tree(ex_node_t) ex_t;
- * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
- *
- * The following API is generated:
- *
- * static void
- * ex_new(ex_t *tree);
- * Description: Initialize a red-black tree structure.
- * Args:
- * tree: Pointer to an uninitialized red-black tree object.
- *
- * static bool
- * ex_empty(ex_t *tree);
- * Description: Determine whether tree is empty.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * Ret: True if tree is empty, false otherwise.
- *
- * static ex_node_t *
- * ex_first(ex_t *tree);
- * static ex_node_t *
- * ex_last(ex_t *tree);
- * Description: Get the first/last node in tree.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * Ret: First/last node in tree, or NULL if tree is empty.
- *
- * static ex_node_t *
- * ex_next(ex_t *tree, ex_node_t *node);
- * static ex_node_t *
- * ex_prev(ex_t *tree, ex_node_t *node);
- * Description: Get node's successor/predecessor.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * node: A node in tree.
- * Ret: node's successor/predecessor in tree, or NULL if node is
- * last/first.
- *
- * static ex_node_t *
- * ex_search(ex_t *tree, const ex_node_t *key);
- * Description: Search for node that matches key.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * key : Search key.
- * Ret: Node in tree that matches key, or NULL if no match.
- *
- * static ex_node_t *
- * ex_nsearch(ex_t *tree, const ex_node_t *key);
- * static ex_node_t *
- * ex_psearch(ex_t *tree, const ex_node_t *key);
- * Description: Search for node that matches key. If no match is found,
- * return what would be key's successor/predecessor, were
- * key in tree.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * key : Search key.
- * Ret: Node in tree that matches key, or if no match, hypothetical node's
- * successor/predecessor (NULL if no successor/predecessor).
- *
- * static void
- * ex_insert(ex_t *tree, ex_node_t *node);
- * Description: Insert node into tree.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * node: Node to be inserted into tree.
- *
- * static void
- * ex_remove(ex_t *tree, ex_node_t *node);
- * Description: Remove node from tree.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * node: Node in tree to be removed.
- *
- * static ex_node_t *
- * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
- * ex_node_t *, void *), void *arg);
- * static ex_node_t *
- * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
- * ex_node_t *, void *), void *arg);
- * Description: Iterate forward/backward over tree, starting at node. If
- * tree is modified, iteration must be immediately
- * terminated by the callback function that causes the
- * modification.
- * Args:
- * tree : Pointer to an initialized red-black tree object.
- * start: Node at which to start iteration, or NULL to start at
- * first/last node.
- * cb : Callback function, which is called for each node during
- * iteration. Under normal circumstances the callback function
- * should return NULL, which causes iteration to continue. If a
- * callback function returns non-NULL, iteration is immediately
- * terminated and the non-NULL return value is returned by the
- * iterator. This is useful for re-starting iteration after
- * modifying tree.
- * arg : Opaque pointer passed to cb().
- * Ret: NULL if iteration completed, or the non-NULL callback return value
- * that caused termination of the iteration.
- *
- * static void
- * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
- * Description: Iterate over the tree with post-order traversal, remove
- * each node, and run the callback if non-null. This is
- * used for destroying a tree without paying the cost to
- * rebalance it. The tree must not be otherwise altered
- * during traversal.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * cb : Callback function, which, if non-null, is called for each node
- * during iteration. There is no way to stop iteration once it
- * has begun.
- * arg : Opaque pointer passed to cb().
- *
- * The rb_summarized_gen() macro generates all the functions above, but has an
- * expanded interface. In introduces the notion of summarizing subtrees, and of
- * filtering searches in the tree according to the information contained in
- * those summaries.
- * The extra macro argument is:
- * a_summarize:
- * Tree summarization function name, with the following prototype:
- *
- * bool a_summarize(a_type *a_node, const a_type *a_left_child,
- * const a_type *a_right_child);
- *
- * This function should update a_node with the summary of the subtree rooted
- * there, using the data contained in it and the summaries in a_left_child
- * and a_right_child. One or both of them may be NULL. When the tree
- * changes due to an insertion or removal, it updates the summaries of all
- * nodes whose subtrees have changed (always updating the summaries of
- * children before their parents). If the user alters a node in the tree in
- * a way that may change its summary, they can call the generated
- * update_summaries function to bubble up the summary changes to the root.
- * It should return true if the summary changed (or may have changed), and
- * false if it didn't (which will allow the implementation to terminate
- * "bubbling up" the summaries early).
- * As the parameter names indicate, the children are ordered as they are in
- * the tree, a_left_child, if it is not NULL, compares less than a_node,
- * which in turn compares less than a_right_child (if a_right_child is not
- * NULL).
- *
- * Using the same setup as above but replacing the macro with
- * rb_summarized_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp,
- * ex_summarize)
- *
- * Generates all the previous functions, but adds some more:
- *
- * static void
- * ex_update_summaries(ex_t *tree, ex_node_t *node);
- * Description: Recompute all summaries of ancestors of node.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * node: The element of the tree whose summary may have changed.
- *
- * For each of ex_empty, ex_first, ex_last, ex_next, ex_prev, ex_search,
- * ex_nsearch, ex_psearch, ex_iter, and ex_reverse_iter, an additional function
- * is generated as well, with the suffix _filtered (e.g. ex_empty_filtered,
- * ex_first_filtered, etc.). These use the concept of a "filter"; a binary
- * property some node either satisfies or does not satisfy. Clever use of the
- * a_summary argument to rb_summarized_gen can allow efficient computation of
- * these predicates across whole subtrees of the tree.
- * The extended API functions accept three additional arguments after the
- * arguments to the corresponding non-extended equivalent.
- *
- * ex_fn(..., bool (*filter_node)(void *, ex_node_t *),
- * bool (*filter_subtree)(void *, ex_node_t *), void *filter_ctx);
- * filter_node : Returns true if the node passes the filter.
- * filter_subtree : Returns true if some node in the subtree rooted at
- * node passes the filter.
- * filter_ctx : A context argument passed to the filters.
- *
- * For a more concrete example of summarizing and filtering, suppose we're using
- * the red-black tree to track a set of integers:
- *
- * struct ex_node_s {
- * rb_node(ex_node_t) ex_link;
- * unsigned data;
- * };
- *
- * Suppose, for some application-specific reason, we want to be able to quickly
- * find numbers in the set which are divisible by large powers of 2 (say, for
- * aligned allocation purposes). We augment the node with a summary field:
- *
- * struct ex_node_s {
- * rb_node(ex_node_t) ex_link;
- * unsigned data;
- * unsigned max_subtree_ffs;
- * }
- *
- * and define our summarization function as follows:
- *
- * bool
- * ex_summarize(ex_node_t *node, const ex_node_t *lchild,
- * const ex_node_t *rchild) {
- * unsigned new_max_subtree_ffs = ffs(node->data);
- * if (lchild != NULL && lchild->max_subtree_ffs > new_max_subtree_ffs) {
- * new_max_subtree_ffs = lchild->max_subtree_ffs;
- * }
- * if (rchild != NULL && rchild->max_subtree_ffs > new_max_subtree_ffs) {
- * new_max_subtree_ffs = rchild->max_subtree_ffs;
- * }
- * bool changed = (node->max_subtree_ffs != new_max_subtree_ffs)
- * node->max_subtree_ffs = new_max_subtree_ffs;
- * // This could be "return true" without any correctness or big-O
- * // performance changes; but practically, precisely reporting summary
- * // changes reduces the amount of work that has to be done when "bubbling
- * // up" summary changes.
- * return changed;
- * }
- *
- * We can now implement our filter functions as follows:
- * bool
- * ex_filter_node(void *filter_ctx, ex_node_t *node) {
- * unsigned required_ffs = *(unsigned *)filter_ctx;
- * return ffs(node->data) >= required_ffs;
- * }
- * bool
- * ex_filter_subtree(void *filter_ctx, ex_node_t *node) {
- * unsigned required_ffs = *(unsigned *)filter_ctx;
- * return node->max_subtree_ffs >= required_ffs;
- * }
- *
- * We can now easily search for, e.g., the smallest integer in the set that's
- * divisible by 128:
- * ex_node_t *
- * find_div_128(ex_tree_t *tree) {
- * unsigned min_ffs = 7;
- * return ex_first_filtered(tree, &ex_filter_node, &ex_filter_subtree,
- * &min_ffs);
- * }
- *
- * We could with similar ease:
- * - Fnd the next multiple of 128 in the set that's larger than 12345 (with
- * ex_nsearch_filtered)
- * - Iterate over just those multiples of 64 that are in the set (with
- * ex_iter_filtered)
- * - Determine if the set contains any multiples of 1024 (with
- * ex_empty_filtered).
- *
- * Some possibly subtle API notes:
- * - The node argument to ex_next_filtered and ex_prev_filtered need not pass
- * the filter; it will find the next/prev node that passes the filter.
- * - ex_search_filtered will fail even for a node in the tree, if that node does
- * not pass the filter. ex_psearch_filtered and ex_nsearch_filtered behave
- * similarly; they may return a node larger/smaller than the key, even if a
- * node equivalent to the key is in the tree (but does not pass the filter).
- * - Similarly, if the start argument to a filtered iteration function does not
- * pass the filter, the callback won't be invoked on it.
- *
- * These should make sense after a moment's reflection; each post-condition is
- * the same as with the unfiltered version, with the added constraint that the
- * returned node must pass the filter.
- */
-#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
- rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
- rb_empty_summarize, false)
-#define rb_summarized_gen(a_attr, a_prefix, a_rbt_type, a_type, \
- a_field, a_cmp, a_summarize) \
- rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
- a_summarize, true)
-
-#define rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, \
- a_field, a_cmp, a_summarize, a_is_summarized) \
-typedef struct { \
- a_type *node; \
- int cmp; \
-} a_prefix##path_entry_t; \
-static inline void \
-a_prefix##summarize_range(a_prefix##path_entry_t *rfirst, \
- a_prefix##path_entry_t *rlast) { \
- while ((uintptr_t)rlast >= (uintptr_t)rfirst) { \
- a_type *node = rlast->node; \
- /* Avoid a warning when a_summarize is rb_empty_summarize. */ \
- (void)node; \
- bool changed = a_summarize(node, rbtn_left_get(a_type, a_field, \
- node), rbtn_right_get(a_type, a_field, node)); \
- if (!changed) { \
- break; \
- } \
- rlast--; \
- } \
-} \
-/* On the remove pathways, we sometimes swap the node being removed */\
-/* and its first successor; in such cases we need to do two range */\
-/* updates; one from the node to its (former) swapped successor, the */\
-/* next from that successor to the root (with either allowed to */\
-/* bail out early if appropriate. */\
-static inline void \
-a_prefix##summarize_swapped_range(a_prefix##path_entry_t *rfirst, \
- a_prefix##path_entry_t *rlast, a_prefix##path_entry_t *swap_loc) { \
- if (swap_loc == NULL || rlast <= swap_loc) { \
- a_prefix##summarize_range(rfirst, rlast); \
- } else { \
- a_prefix##summarize_range(swap_loc + 1, rlast); \
- (void)a_summarize(swap_loc->node, \
- rbtn_left_get(a_type, a_field, swap_loc->node), \
- rbtn_right_get(a_type, a_field, swap_loc->node)); \
- a_prefix##summarize_range(rfirst, swap_loc - 1); \
- } \
-} \
-a_attr void \
-a_prefix##new(a_rbt_type *rbtree) { \
- rb_new(a_type, a_field, rbtree); \
-} \
-a_attr bool \
-a_prefix##empty(a_rbt_type *rbtree) { \
- return (rbtree->rbt_root == NULL); \
-} \
-a_attr a_type * \
-a_prefix##first(a_rbt_type *rbtree) { \
- a_type *ret; \
- rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##last(a_rbt_type *rbtree) { \
- a_type *ret; \
- rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
- a_type *ret; \
- if (rbtn_right_get(a_type, a_field, node) != NULL) { \
- rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
- a_field, node), ret); \
- } else { \
- a_type *tnode = rbtree->rbt_root; \
- assert(tnode != NULL); \
- ret = NULL; \
- while (true) { \
- int cmp = (a_cmp)(node, tnode); \
- if (cmp < 0) { \
- ret = tnode; \
- tnode = rbtn_left_get(a_type, a_field, tnode); \
- } else if (cmp > 0) { \
- tnode = rbtn_right_get(a_type, a_field, tnode); \
- } else { \
- break; \
- } \
- assert(tnode != NULL); \
- } \
- } \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
- a_type *ret; \
- if (rbtn_left_get(a_type, a_field, node) != NULL) { \
- rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
- a_field, node), ret); \
- } else { \
- a_type *tnode = rbtree->rbt_root; \
- assert(tnode != NULL); \
- ret = NULL; \
- while (true) { \
- int cmp = (a_cmp)(node, tnode); \
- if (cmp < 0) { \
- tnode = rbtn_left_get(a_type, a_field, tnode); \
- } else if (cmp > 0) { \
- ret = tnode; \
- tnode = rbtn_right_get(a_type, a_field, tnode); \
- } else { \
- break; \
- } \
- assert(tnode != NULL); \
- } \
- } \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
- a_type *ret; \
- int cmp; \
- ret = rbtree->rbt_root; \
- while (ret != NULL \
- && (cmp = (a_cmp)(key, ret)) != 0) { \
- if (cmp < 0) { \
- ret = rbtn_left_get(a_type, a_field, ret); \
- } else { \
- ret = rbtn_right_get(a_type, a_field, ret); \
- } \
- } \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
- a_type *ret; \
- a_type *tnode = rbtree->rbt_root; \
- ret = NULL; \
- while (tnode != NULL) { \
- int cmp = (a_cmp)(key, tnode); \
- if (cmp < 0) { \
- ret = tnode; \
- tnode = rbtn_left_get(a_type, a_field, tnode); \
- } else if (cmp > 0) { \
- tnode = rbtn_right_get(a_type, a_field, tnode); \
- } else { \
- ret = tnode; \
- break; \
- } \
- } \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
- a_type *ret; \
- a_type *tnode = rbtree->rbt_root; \
- ret = NULL; \
- while (tnode != NULL) { \
- int cmp = (a_cmp)(key, tnode); \
- if (cmp < 0) { \
- tnode = rbtn_left_get(a_type, a_field, tnode); \
- } else if (cmp > 0) { \
- ret = tnode; \
- tnode = rbtn_right_get(a_type, a_field, tnode); \
- } else { \
- ret = tnode; \
- break; \
- } \
- } \
- return ret; \
-} \
-a_attr void \
-a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
- a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
- a_prefix##path_entry_t *pathp; \
- rbt_node_new(a_type, a_field, rbtree, node); \
- /* Wind. */ \
- path->node = rbtree->rbt_root; \
- for (pathp = path; pathp->node != NULL; pathp++) { \
- int cmp = pathp->cmp = a_cmp(node, pathp->node); \
- assert(cmp != 0); \
- if (cmp < 0) { \
- pathp[1].node = rbtn_left_get(a_type, a_field, \
- pathp->node); \
- } else { \
- pathp[1].node = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- } \
- } \
- pathp->node = node; \
- /* A loop invariant we maintain is that all nodes with */\
- /* out-of-date summaries live in path[0], path[1], ..., *pathp. */\
- /* To maintain this, we have to summarize node, since we */\
- /* decrement pathp before the first iteration. */\
- assert(rbtn_left_get(a_type, a_field, node) == NULL); \
- assert(rbtn_right_get(a_type, a_field, node) == NULL); \
- (void)a_summarize(node, NULL, NULL); \
- /* Unwind. */ \
- for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
- a_type *cnode = pathp->node; \
- if (pathp->cmp < 0) { \
- a_type *left = pathp[1].node; \
- rbtn_left_set(a_type, a_field, cnode, left); \
- if (rbtn_red_get(a_type, a_field, left)) { \
- a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
- /* Fix up 4-node. */ \
- a_type *tnode; \
- rbtn_black_set(a_type, a_field, leftleft); \
- rbtn_rotate_right(a_type, a_field, cnode, tnode); \
- (void)a_summarize(cnode, \
- rbtn_left_get(a_type, a_field, cnode), \
- rbtn_right_get(a_type, a_field, cnode)); \
- cnode = tnode; \
- } \
- } else { \
- a_prefix##summarize_range(path, pathp); \
- return; \
- } \
- } else { \
- a_type *right = pathp[1].node; \
- rbtn_right_set(a_type, a_field, cnode, right); \
- if (rbtn_red_get(a_type, a_field, right)) { \
- a_type *left = rbtn_left_get(a_type, a_field, cnode); \
- if (left != NULL && rbtn_red_get(a_type, a_field, \
- left)) { \
- /* Split 4-node. */ \
- rbtn_black_set(a_type, a_field, left); \
- rbtn_black_set(a_type, a_field, right); \
- rbtn_red_set(a_type, a_field, cnode); \
- } else { \
- /* Lean left. */ \
- a_type *tnode; \
- bool tred = rbtn_red_get(a_type, a_field, cnode); \
- rbtn_rotate_left(a_type, a_field, cnode, tnode); \
- rbtn_color_set(a_type, a_field, tnode, tred); \
- rbtn_red_set(a_type, a_field, cnode); \
- (void)a_summarize(cnode, \
- rbtn_left_get(a_type, a_field, cnode), \
- rbtn_right_get(a_type, a_field, cnode)); \
- cnode = tnode; \
- } \
- } else { \
- a_prefix##summarize_range(path, pathp); \
- return; \
- } \
- } \
- pathp->node = cnode; \
- (void)a_summarize(cnode, \
- rbtn_left_get(a_type, a_field, cnode), \
- rbtn_right_get(a_type, a_field, cnode)); \
- } \
- /* Set root, and make it black. */ \
- rbtree->rbt_root = path->node; \
- rbtn_black_set(a_type, a_field, rbtree->rbt_root); \
-} \
-a_attr void \
-a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
- a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
- a_prefix##path_entry_t *pathp; \
- a_prefix##path_entry_t *nodep; \
- a_prefix##path_entry_t *swap_loc; \
- /* This is a "real" sentinel -- NULL means we didn't swap the */\
- /* node to be pruned with one of its successors, and so */\
- /* summarization can terminate early whenever some summary */\
- /* doesn't change. */\
- swap_loc = NULL; \
- /* This is just to silence a compiler warning. */ \
- nodep = NULL; \
- /* Wind. */ \
- path->node = rbtree->rbt_root; \
- for (pathp = path; pathp->node != NULL; pathp++) { \
- int cmp = pathp->cmp = a_cmp(node, pathp->node); \
- if (cmp < 0) { \
- pathp[1].node = rbtn_left_get(a_type, a_field, \
- pathp->node); \
- } else { \
- pathp[1].node = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- if (cmp == 0) { \
- /* Find node's successor, in preparation for swap. */ \
- pathp->cmp = 1; \
- nodep = pathp; \
- for (pathp++; pathp->node != NULL; pathp++) { \
- pathp->cmp = -1; \
- pathp[1].node = rbtn_left_get(a_type, a_field, \
- pathp->node); \
- } \
- break; \
- } \
- } \
- } \
- assert(nodep->node == node); \
- pathp--; \
- if (pathp->node != node) { \
- /* Swap node with its successor. */ \
- swap_loc = nodep; \
- bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
- rbtn_color_set(a_type, a_field, pathp->node, \
- rbtn_red_get(a_type, a_field, node)); \
- rbtn_left_set(a_type, a_field, pathp->node, \
- rbtn_left_get(a_type, a_field, node)); \
- /* If node's successor is its right child, the following code */\
- /* will do the wrong thing for the right child pointer. */\
- /* However, it doesn't matter, because the pointer will be */\
- /* properly set when the successor is pruned. */\
- rbtn_right_set(a_type, a_field, pathp->node, \
- rbtn_right_get(a_type, a_field, node)); \
- rbtn_color_set(a_type, a_field, node, tred); \
- /* The pruned leaf node's child pointers are never accessed */\
- /* again, so don't bother setting them to nil. */\
- nodep->node = pathp->node; \
- pathp->node = node; \
- if (nodep == path) { \
- rbtree->rbt_root = nodep->node; \
- } else { \
- if (nodep[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, nodep[-1].node, \
- nodep->node); \
- } else { \
- rbtn_right_set(a_type, a_field, nodep[-1].node, \
- nodep->node); \
- } \
- } \
- } else { \
- a_type *left = rbtn_left_get(a_type, a_field, node); \
- if (left != NULL) { \
- /* node has no successor, but it has a left child. */\
- /* Splice node out, without losing the left child. */\
- assert(!rbtn_red_get(a_type, a_field, node)); \
- assert(rbtn_red_get(a_type, a_field, left)); \
- rbtn_black_set(a_type, a_field, left); \
- if (pathp == path) { \
- rbtree->rbt_root = left; \
- /* Nothing to summarize -- the subtree rooted at the */\
- /* node's left child hasn't changed, and it's now the */\
- /* root. */\
- } else { \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- left); \
- } else { \
- rbtn_right_set(a_type, a_field, pathp[-1].node, \
- left); \
- } \
- a_prefix##summarize_swapped_range(path, &pathp[-1], \
- swap_loc); \
- } \
- return; \
- } else if (pathp == path) { \
- /* The tree only contained one node. */ \
- rbtree->rbt_root = NULL; \
- return; \
- } \
- } \
- /* We've now established the invariant that the node has no right */\
- /* child (well, morally; we didn't bother nulling it out if we */\
- /* swapped it with its successor), and that the only nodes with */\
- /* out-of-date summaries live in path[0], path[1], ..., pathp[-1].*/\
- if (rbtn_red_get(a_type, a_field, pathp->node)) { \
- /* Prune red node, which requires no fixup. */ \
- assert(pathp[-1].cmp < 0); \
- rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
- a_prefix##summarize_swapped_range(path, &pathp[-1], swap_loc); \
- return; \
- } \
- /* The node to be pruned is black, so unwind until balance is */\
- /* restored. */\
- pathp->node = NULL; \
- for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
- assert(pathp->cmp != 0); \
- if (pathp->cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp->node, \
- pathp[1].node); \
- if (rbtn_red_get(a_type, a_field, pathp->node)) { \
- a_type *right = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- a_type *rightleft = rbtn_left_get(a_type, a_field, \
- right); \
- a_type *tnode; \
- if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
- rightleft)) { \
- /* In the following diagrams, ||, //, and \\ */\
- /* indicate the path to the removed node. */\
- /* */\
- /* || */\
- /* pathp(r) */\
- /* // \ */\
- /* (b) (b) */\
- /* / */\
- /* (r) */\
- /* */\
- rbtn_black_set(a_type, a_field, pathp->node); \
- rbtn_rotate_right(a_type, a_field, right, tnode); \
- rbtn_right_set(a_type, a_field, pathp->node, tnode);\
- rbtn_rotate_left(a_type, a_field, pathp->node, \
- tnode); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- (void)a_summarize(right, \
- rbtn_left_get(a_type, a_field, right), \
- rbtn_right_get(a_type, a_field, right)); \
- } else { \
- /* || */\
- /* pathp(r) */\
- /* // \ */\
- /* (b) (b) */\
- /* / */\
- /* (b) */\
- /* */\
- rbtn_rotate_left(a_type, a_field, pathp->node, \
- tnode); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- } \
- (void)a_summarize(tnode, rbtn_left_get(a_type, a_field, \
- tnode), rbtn_right_get(a_type, a_field, tnode)); \
- /* Balance restored, but rotation modified subtree */\
- /* root. */\
- assert((uintptr_t)pathp > (uintptr_t)path); \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } \
- a_prefix##summarize_swapped_range(path, &pathp[-1], \
- swap_loc); \
- return; \
- } else { \
- a_type *right = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- a_type *rightleft = rbtn_left_get(a_type, a_field, \
- right); \
- if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
- rightleft)) { \
- /* || */\
- /* pathp(b) */\
- /* // \ */\
- /* (b) (b) */\
- /* / */\
- /* (r) */\
- a_type *tnode; \
- rbtn_black_set(a_type, a_field, rightleft); \
- rbtn_rotate_right(a_type, a_field, right, tnode); \
- rbtn_right_set(a_type, a_field, pathp->node, tnode);\
- rbtn_rotate_left(a_type, a_field, pathp->node, \
- tnode); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- (void)a_summarize(right, \
- rbtn_left_get(a_type, a_field, right), \
- rbtn_right_get(a_type, a_field, right)); \
- (void)a_summarize(tnode, \
- rbtn_left_get(a_type, a_field, tnode), \
- rbtn_right_get(a_type, a_field, tnode)); \
- /* Balance restored, but rotation modified */\
- /* subtree root, which may actually be the tree */\
- /* root. */\
- if (pathp == path) { \
- /* Set root. */ \
- rbtree->rbt_root = tnode; \
- } else { \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, \
- pathp[-1].node, tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, \
- pathp[-1].node, tnode); \
- } \
- a_prefix##summarize_swapped_range(path, \
- &pathp[-1], swap_loc); \
- } \
- return; \
- } else { \
- /* || */\
- /* pathp(b) */\
- /* // \ */\
- /* (b) (b) */\
- /* / */\
- /* (b) */\
- a_type *tnode; \
- rbtn_red_set(a_type, a_field, pathp->node); \
- rbtn_rotate_left(a_type, a_field, pathp->node, \
- tnode); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- (void)a_summarize(tnode, \
- rbtn_left_get(a_type, a_field, tnode), \
- rbtn_right_get(a_type, a_field, tnode)); \
- pathp->node = tnode; \
- } \
- } \
- } else { \
- a_type *left; \
- rbtn_right_set(a_type, a_field, pathp->node, \
- pathp[1].node); \
- left = rbtn_left_get(a_type, a_field, pathp->node); \
- if (rbtn_red_get(a_type, a_field, left)) { \
- a_type *tnode; \
- a_type *leftright = rbtn_right_get(a_type, a_field, \
- left); \
- a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
- leftright); \
- if (leftrightleft != NULL && rbtn_red_get(a_type, \
- a_field, leftrightleft)) { \
- /* || */\
- /* pathp(b) */\
- /* / \\ */\
- /* (r) (b) */\
- /* \ */\
- /* (b) */\
- /* / */\
- /* (r) */\
- a_type *unode; \
- rbtn_black_set(a_type, a_field, leftrightleft); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- unode); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- tnode); \
- rbtn_right_set(a_type, a_field, unode, tnode); \
- rbtn_rotate_left(a_type, a_field, unode, tnode); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- (void)a_summarize(unode, \
- rbtn_left_get(a_type, a_field, unode), \
- rbtn_right_get(a_type, a_field, unode)); \
- } else { \
- /* || */\
- /* pathp(b) */\
- /* / \\ */\
- /* (r) (b) */\
- /* \ */\
- /* (b) */\
- /* / */\
- /* (b) */\
- assert(leftright != NULL); \
- rbtn_red_set(a_type, a_field, leftright); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- tnode); \
- rbtn_black_set(a_type, a_field, tnode); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- } \
- (void)a_summarize(tnode, \
- rbtn_left_get(a_type, a_field, tnode), \
- rbtn_right_get(a_type, a_field, tnode)); \
- /* Balance restored, but rotation modified subtree */\
- /* root, which may actually be the tree root. */\
- if (pathp == path) { \
- /* Set root. */ \
- rbtree->rbt_root = tnode; \
- } else { \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } \
- a_prefix##summarize_swapped_range(path, &pathp[-1], \
- swap_loc); \
- } \
- return; \
- } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
- a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
- /* || */\
- /* pathp(r) */\
- /* / \\ */\
- /* (b) (b) */\
- /* / */\
- /* (r) */\
- a_type *tnode; \
- rbtn_black_set(a_type, a_field, pathp->node); \
- rbtn_red_set(a_type, a_field, left); \
- rbtn_black_set(a_type, a_field, leftleft); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- tnode); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- (void)a_summarize(tnode, \
- rbtn_left_get(a_type, a_field, tnode), \
- rbtn_right_get(a_type, a_field, tnode)); \
- /* Balance restored, but rotation modified */\
- /* subtree root. */\
- assert((uintptr_t)pathp > (uintptr_t)path); \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, pathp[-1].node, \
- tnode); \
- } \
- a_prefix##summarize_swapped_range(path, &pathp[-1], \
- swap_loc); \
- return; \
- } else { \
- /* || */\
- /* pathp(r) */\
- /* / \\ */\
- /* (b) (b) */\
- /* / */\
- /* (b) */\
- rbtn_red_set(a_type, a_field, left); \
- rbtn_black_set(a_type, a_field, pathp->node); \
- /* Balance restored. */ \
- a_prefix##summarize_swapped_range(path, pathp, \
- swap_loc); \
- return; \
- } \
- } else { \
- a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
- /* || */\
- /* pathp(b) */\
- /* / \\ */\
- /* (b) (b) */\
- /* / */\
- /* (r) */\
- a_type *tnode; \
- rbtn_black_set(a_type, a_field, leftleft); \
- rbtn_rotate_right(a_type, a_field, pathp->node, \
- tnode); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- (void)a_summarize(tnode, \
- rbtn_left_get(a_type, a_field, tnode), \
- rbtn_right_get(a_type, a_field, tnode)); \
- /* Balance restored, but rotation modified */\
- /* subtree root, which may actually be the tree */\
- /* root. */\
- if (pathp == path) { \
- /* Set root. */ \
- rbtree->rbt_root = tnode; \
- } else { \
- if (pathp[-1].cmp < 0) { \
- rbtn_left_set(a_type, a_field, \
- pathp[-1].node, tnode); \
- } else { \
- rbtn_right_set(a_type, a_field, \
- pathp[-1].node, tnode); \
- } \
- a_prefix##summarize_swapped_range(path, \
- &pathp[-1], swap_loc); \
- } \
- return; \
- } else { \
- /* || */\
- /* pathp(b) */\
- /* / \\ */\
- /* (b) (b) */\
- /* / */\
- /* (b) */\
- rbtn_red_set(a_type, a_field, left); \
- (void)a_summarize(pathp->node, \
- rbtn_left_get(a_type, a_field, pathp->node), \
- rbtn_right_get(a_type, a_field, pathp->node)); \
- } \
- } \
- } \
- } \
- /* Set root. */ \
- rbtree->rbt_root = path->node; \
- assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
-} \
-a_attr a_type * \
-a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return NULL; \
- } else { \
- a_type *ret; \
- if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
- a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
- arg)) != NULL) { \
- return ret; \
- } \
- return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg); \
- } \
-} \
-a_attr a_type * \
-a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- int cmp = a_cmp(start, node); \
- if (cmp < 0) { \
- a_type *ret; \
- if ((ret = a_prefix##iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
- } \
- return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg); \
- } else if (cmp > 0) { \
- return a_prefix##iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg); \
- } else { \
- a_type *ret; \
- if ((ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
- } \
- return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
- a_field, node), cb, arg); \
- } \
-} \
-a_attr a_type * \
-a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
- a_rbt_type *, a_type *, void *), void *arg) { \
- a_type *ret; \
- if (start != NULL) { \
- ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
- cb, arg); \
- } else { \
- ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
- } \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return NULL; \
- } else { \
- a_type *ret; \
- if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
- } \
- return a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg); \
- } \
-} \
-a_attr a_type * \
-a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
- a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
- void *arg) { \
- int cmp = a_cmp(start, node); \
- if (cmp > 0) { \
- a_type *ret; \
- if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
- } \
- return a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg); \
- } else if (cmp < 0) { \
- return a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg); \
- } else { \
- a_type *ret; \
- if ((ret = cb(rbtree, node, arg)) != NULL) { \
- return ret; \
- } \
- return a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_left_get(a_type, a_field, node), cb, arg); \
- } \
-} \
-a_attr a_type * \
-a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- a_type *ret; \
- if (start != NULL) { \
- ret = a_prefix##reverse_iter_start(rbtree, start, \
- rbtree->rbt_root, cb, arg); \
- } else { \
- ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
- cb, arg); \
- } \
- return ret; \
-} \
-a_attr void \
-a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
- a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return; \
- } \
- a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
- node), cb, arg); \
- rbtn_left_set(a_type, a_field, (node), NULL); \
- a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
- node), cb, arg); \
- rbtn_right_set(a_type, a_field, (node), NULL); \
- if (cb) { \
- cb(node, arg); \
- } \
-} \
-a_attr void \
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg) { \
- a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
- rbtree->rbt_root = NULL; \
-} \
-/* BEGIN SUMMARIZED-ONLY IMPLEMENTATION */ \
-rb_summarized_only_##a_is_summarized( \
-static inline a_prefix##path_entry_t * \
-a_prefix##wind(a_rbt_type *rbtree, \
- a_prefix##path_entry_t path[RB_MAX_DEPTH], a_type *node) { \
- a_prefix##path_entry_t *pathp; \
- path->node = rbtree->rbt_root; \
- for (pathp = path; ; pathp++) { \
- assert((size_t)(pathp - path) < RB_MAX_DEPTH); \
- pathp->cmp = a_cmp(node, pathp->node); \
- if (pathp->cmp < 0) { \
- pathp[1].node = rbtn_left_get(a_type, a_field, \
- pathp->node); \
- } else if (pathp->cmp == 0) { \
- return pathp; \
- } else { \
- pathp[1].node = rbtn_right_get(a_type, a_field, \
- pathp->node); \
- } \
- } \
- unreachable(); \
-} \
-a_attr void \
-a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node) { \
- a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
- a_prefix##path_entry_t *pathp = a_prefix##wind(rbtree, path, node); \
- a_prefix##summarize_range(path, pathp); \
-} \
-a_attr bool \
-a_prefix##empty_filtered(a_rbt_type *rbtree, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *node = rbtree->rbt_root; \
- return node == NULL || !filter_subtree(filter_ctx, node); \
-} \
-static inline a_type * \
-a_prefix##first_filtered_from_node(a_type *node, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- assert(node != NULL && filter_subtree(filter_ctx, node)); \
- while (true) { \
- a_type *left = rbtn_left_get(a_type, a_field, node); \
- a_type *right = rbtn_right_get(a_type, a_field, node); \
- if (left != NULL && filter_subtree(filter_ctx, left)) { \
- node = left; \
- } else if (filter_node(filter_ctx, node)) { \
- return node; \
- } else { \
- assert(right != NULL \
- && filter_subtree(filter_ctx, right)); \
- node = right; \
- } \
- } \
- unreachable(); \
-} \
-a_attr a_type * \
-a_prefix##first_filtered(a_rbt_type *rbtree, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *node = rbtree->rbt_root; \
- if (node == NULL || !filter_subtree(filter_ctx, node)) { \
- return NULL; \
- } \
- return a_prefix##first_filtered_from_node(node, filter_node, \
- filter_subtree, filter_ctx); \
-} \
-static inline a_type * \
-a_prefix##last_filtered_from_node(a_type *node, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- assert(node != NULL && filter_subtree(filter_ctx, node)); \
- while (true) { \
- a_type *left = rbtn_left_get(a_type, a_field, node); \
- a_type *right = rbtn_right_get(a_type, a_field, node); \
- if (right != NULL && filter_subtree(filter_ctx, right)) { \
- node = right; \
- } else if (filter_node(filter_ctx, node)) { \
- return node; \
- } else { \
- assert(left != NULL \
- && filter_subtree(filter_ctx, left)); \
- node = left; \
- } \
- } \
- unreachable(); \
-} \
-a_attr a_type * \
-a_prefix##last_filtered(a_rbt_type *rbtree, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *node = rbtree->rbt_root; \
- if (node == NULL || !filter_subtree(filter_ctx, node)) { \
- return NULL; \
- } \
- return a_prefix##last_filtered_from_node(node, filter_node, \
- filter_subtree, filter_ctx); \
-} \
-/* Internal implementation function. Search for a node comparing */\
-/* equal to key matching the filter. If such a node is in the tree, */\
-/* return it. Additionally, the caller has the option to ask for */\
-/* bounds on the next / prev node in the tree passing the filter. */\
-/* If nextbound is true, then this function will do one of the */\
-/* following: */\
-/* - Fill in *nextbound_node with the smallest node in the tree */\
-/* greater than key passing the filter, and NULL-out */\
-/* *nextbound_subtree. */\
-/* - Fill in *nextbound_subtree with a parent of that node which is */\
-/* not a parent of the searched-for node, and NULL-out */\
-/* *nextbound_node. */\
-/* - NULL-out both *nextbound_node and *nextbound_subtree, in which */\
-/* case no node greater than key but passing the filter is in the */\
-/* tree. */\
-/* The prevbound case is similar. If the caller knows that key is in */\
-/* the tree and that the subtree rooted at key does not contain a */\
-/* node satisfying the bound being searched for, then they can pass */\
-/* false for include_subtree, in which case we won't bother searching */\
-/* there (risking a cache miss). */\
-/* */\
-/* This API is unfortunately complex; but the logic for filtered */\
-/* searches is very subtle, and otherwise we would have to repeat it */\
-/* multiple times for filtered search, nsearch, psearch, next, and */\
-/* prev. */\
-static inline a_type * \
-a_prefix##search_with_filter_bounds(a_rbt_type *rbtree, \
- const a_type *key, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx, \
- bool include_subtree, \
- bool nextbound, a_type **nextbound_node, a_type **nextbound_subtree, \
- bool prevbound, a_type **prevbound_node, a_type **prevbound_subtree) {\
- if (nextbound) { \
- *nextbound_node = NULL; \
- *nextbound_subtree = NULL; \
- } \
- if (prevbound) { \
- *prevbound_node = NULL; \
- *prevbound_subtree = NULL; \
- } \
- a_type *tnode = rbtree->rbt_root; \
- while (tnode != NULL && filter_subtree(filter_ctx, tnode)) { \
- int cmp = a_cmp(key, tnode); \
- a_type *tleft = rbtn_left_get(a_type, a_field, tnode); \
- a_type *tright = rbtn_right_get(a_type, a_field, tnode); \
- if (cmp < 0) { \
- if (nextbound) { \
- if (filter_node(filter_ctx, tnode)) { \
- *nextbound_node = tnode; \
- *nextbound_subtree = NULL; \
- } else if (tright != NULL && filter_subtree( \
- filter_ctx, tright)) { \
- *nextbound_node = NULL; \
- *nextbound_subtree = tright; \
- } \
- } \
- tnode = tleft; \
- } else if (cmp > 0) { \
- if (prevbound) { \
- if (filter_node(filter_ctx, tnode)) { \
- *prevbound_node = tnode; \
- *prevbound_subtree = NULL; \
- } else if (tleft != NULL && filter_subtree( \
- filter_ctx, tleft)) { \
- *prevbound_node = NULL; \
- *prevbound_subtree = tleft; \
- } \
- } \
- tnode = tright; \
- } else { \
- if (filter_node(filter_ctx, tnode)) { \
- return tnode; \
- } \
- if (include_subtree) { \
- if (prevbound && tleft != NULL && filter_subtree( \
- filter_ctx, tleft)) { \
- *prevbound_node = NULL; \
- *prevbound_subtree = tleft; \
- } \
- if (nextbound && tright != NULL && filter_subtree( \
- filter_ctx, tright)) { \
- *nextbound_node = NULL; \
- *nextbound_subtree = tright; \
- } \
- } \
- return NULL; \
- } \
- } \
- return NULL; \
-} \
-a_attr a_type * \
-a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *nright = rbtn_right_get(a_type, a_field, node); \
- if (nright != NULL && filter_subtree(filter_ctx, nright)) { \
- return a_prefix##first_filtered_from_node(nright, filter_node, \
- filter_subtree, filter_ctx); \
- } \
- a_type *node_candidate; \
- a_type *subtree_candidate; \
- a_type *search_result = a_prefix##search_with_filter_bounds( \
- rbtree, node, filter_node, filter_subtree, filter_ctx, \
- /* include_subtree */ false, \
- /* nextbound */ true, &node_candidate, &subtree_candidate, \
- /* prevbound */ false, NULL, NULL); \
- assert(node == search_result \
- || !filter_node(filter_ctx, node)); \
- if (node_candidate != NULL) { \
- return node_candidate; \
- } \
- if (subtree_candidate != NULL) { \
- return a_prefix##first_filtered_from_node( \
- subtree_candidate, filter_node, filter_subtree, \
- filter_ctx); \
- } \
- return NULL; \
-} \
-a_attr a_type * \
-a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *nleft = rbtn_left_get(a_type, a_field, node); \
- if (nleft != NULL && filter_subtree(filter_ctx, nleft)) { \
- return a_prefix##last_filtered_from_node(nleft, filter_node, \
- filter_subtree, filter_ctx); \
- } \
- a_type *node_candidate; \
- a_type *subtree_candidate; \
- a_type *search_result = a_prefix##search_with_filter_bounds( \
- rbtree, node, filter_node, filter_subtree, filter_ctx, \
- /* include_subtree */ false, \
- /* nextbound */ false, NULL, NULL, \
- /* prevbound */ true, &node_candidate, &subtree_candidate); \
- assert(node == search_result \
- || !filter_node(filter_ctx, node)); \
- if (node_candidate != NULL) { \
- return node_candidate; \
- } \
- if (subtree_candidate != NULL) { \
- return a_prefix##last_filtered_from_node( \
- subtree_candidate, filter_node, filter_subtree, \
- filter_ctx); \
- } \
- return NULL; \
-} \
-a_attr a_type * \
-a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
- filter_node, filter_subtree, filter_ctx, \
- /* include_subtree */ false, \
- /* nextbound */ false, NULL, NULL, \
- /* prevbound */ false, NULL, NULL); \
- return result; \
-} \
-a_attr a_type * \
-a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *node_candidate; \
- a_type *subtree_candidate; \
- a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
- filter_node, filter_subtree, filter_ctx, \
- /* include_subtree */ true, \
- /* nextbound */ true, &node_candidate, &subtree_candidate, \
- /* prevbound */ false, NULL, NULL); \
- if (result != NULL) { \
- return result; \
- } \
- if (node_candidate != NULL) { \
- return node_candidate; \
- } \
- if (subtree_candidate != NULL) { \
- return a_prefix##first_filtered_from_node( \
- subtree_candidate, filter_node, filter_subtree, \
- filter_ctx); \
- } \
- return NULL; \
-} \
-a_attr a_type * \
-a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *node_candidate; \
- a_type *subtree_candidate; \
- a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
- filter_node, filter_subtree, filter_ctx, \
- /* include_subtree */ true, \
- /* nextbound */ false, NULL, NULL, \
- /* prevbound */ true, &node_candidate, &subtree_candidate); \
- if (result != NULL) { \
- return result; \
- } \
- if (node_candidate != NULL) { \
- return node_candidate; \
- } \
- if (subtree_candidate != NULL) { \
- return a_prefix##last_filtered_from_node( \
- subtree_candidate, filter_node, filter_subtree, \
- filter_ctx); \
- } \
- return NULL; \
-} \
-a_attr a_type * \
-a_prefix##iter_recurse_filtered(a_rbt_type *rbtree, a_type *node, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- if (node == NULL || !filter_subtree(filter_ctx, node)) { \
- return NULL; \
- } \
- a_type *ret; \
- a_type *left = rbtn_left_get(a_type, a_field, node); \
- a_type *right = rbtn_right_get(a_type, a_field, node); \
- ret = a_prefix##iter_recurse_filtered(rbtree, left, cb, arg, \
- filter_node, filter_subtree, filter_ctx); \
- if (ret != NULL) { \
- return ret; \
- } \
- if (filter_node(filter_ctx, node)) { \
- ret = cb(rbtree, node, arg); \
- } \
- if (ret != NULL) { \
- return ret; \
- } \
- return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
- filter_node, filter_subtree, filter_ctx); \
-} \
-a_attr a_type * \
-a_prefix##iter_start_filtered(a_rbt_type *rbtree, a_type *start, \
- a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
- void *arg, bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- if (!filter_subtree(filter_ctx, node)) { \
- return NULL; \
- } \
- int cmp = a_cmp(start, node); \
- a_type *ret; \
- a_type *left = rbtn_left_get(a_type, a_field, node); \
- a_type *right = rbtn_right_get(a_type, a_field, node); \
- if (cmp < 0) { \
- ret = a_prefix##iter_start_filtered(rbtree, start, left, cb, \
- arg, filter_node, filter_subtree, filter_ctx); \
- if (ret != NULL) { \
- return ret; \
- } \
- if (filter_node(filter_ctx, node)) { \
- ret = cb(rbtree, node, arg); \
- if (ret != NULL) { \
- return ret; \
- } \
- } \
- return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
- filter_node, filter_subtree, filter_ctx); \
- } else if (cmp > 0) { \
- return a_prefix##iter_start_filtered(rbtree, start, right, \
- cb, arg, filter_node, filter_subtree, filter_ctx); \
- } else { \
- if (filter_node(filter_ctx, node)) { \
- ret = cb(rbtree, node, arg); \
- if (ret != NULL) { \
- return ret; \
- } \
- } \
- return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
- filter_node, filter_subtree, filter_ctx); \
- } \
-} \
-a_attr a_type * \
-a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *ret; \
- if (start != NULL) { \
- ret = a_prefix##iter_start_filtered(rbtree, start, \
- rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
- filter_ctx); \
- } else { \
- ret = a_prefix##iter_recurse_filtered(rbtree, rbtree->rbt_root, \
- cb, arg, filter_node, filter_subtree, filter_ctx); \
- } \
- return ret; \
-} \
-a_attr a_type * \
-a_prefix##reverse_iter_recurse_filtered(a_rbt_type *rbtree, \
- a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
- void *arg, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- if (node == NULL || !filter_subtree(filter_ctx, node)) { \
- return NULL; \
- } \
- a_type *ret; \
- a_type *left = rbtn_left_get(a_type, a_field, node); \
- a_type *right = rbtn_right_get(a_type, a_field, node); \
- ret = a_prefix##reverse_iter_recurse_filtered(rbtree, right, cb, \
- arg, filter_node, filter_subtree, filter_ctx); \
- if (ret != NULL) { \
- return ret; \
- } \
- if (filter_node(filter_ctx, node)) { \
- ret = cb(rbtree, node, arg); \
- } \
- if (ret != NULL) { \
- return ret; \
- } \
- return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb, \
- arg, filter_node, filter_subtree, filter_ctx); \
-} \
-a_attr a_type * \
-a_prefix##reverse_iter_start_filtered(a_rbt_type *rbtree, a_type *start,\
- a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
- void *arg, bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- if (!filter_subtree(filter_ctx, node)) { \
- return NULL; \
- } \
- int cmp = a_cmp(start, node); \
- a_type *ret; \
- a_type *left = rbtn_left_get(a_type, a_field, node); \
- a_type *right = rbtn_right_get(a_type, a_field, node); \
- if (cmp > 0) { \
- ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
- right, cb, arg, filter_node, filter_subtree, filter_ctx); \
- if (ret != NULL) { \
- return ret; \
- } \
- if (filter_node(filter_ctx, node)) { \
- ret = cb(rbtree, node, arg); \
- if (ret != NULL) { \
- return ret; \
- } \
- } \
- return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
- arg, filter_node, filter_subtree, filter_ctx); \
- } else if (cmp < 0) { \
- return a_prefix##reverse_iter_start_filtered(rbtree, start, \
- left, cb, arg, filter_node, filter_subtree, filter_ctx); \
- } else { \
- if (filter_node(filter_ctx, node)) { \
- ret = cb(rbtree, node, arg); \
- if (ret != NULL) { \
- return ret; \
- } \
- } \
- return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
- arg, filter_node, filter_subtree, filter_ctx); \
- } \
-} \
-a_attr a_type * \
-a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
- bool (*filter_node)(void *, a_type *), \
- bool (*filter_subtree)(void *, a_type *), \
- void *filter_ctx) { \
- a_type *ret; \
- if (start != NULL) { \
- ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
- rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
- filter_ctx); \
- } else { \
- ret = a_prefix##reverse_iter_recurse_filtered(rbtree, \
- rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
- filter_ctx); \
- } \
- return ret; \
-} \
-) /* end rb_summarized_only */
-
-#endif /* JEMALLOC_INTERNAL_RB_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rtree.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rtree.h
deleted file mode 100644
index a00adb29..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rtree.h
+++ /dev/null
@@ -1,554 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_RTREE_H
-#define JEMALLOC_INTERNAL_RTREE_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree_tsd.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/tsd.h"
-
-/*
- * This radix tree implementation is tailored to the singular purpose of
- * associating metadata with extents that are currently owned by jemalloc.
- *
- *******************************************************************************
- */
-
-/* Number of high insignificant bits. */
-#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
-/* Number of low insigificant bits. */
-#define RTREE_NLIB LG_PAGE
-/* Number of significant bits. */
-#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
-/* Number of levels in radix tree. */
-#if RTREE_NSB <= 10
-# define RTREE_HEIGHT 1
-#elif RTREE_NSB <= 36
-# define RTREE_HEIGHT 2
-#elif RTREE_NSB <= 52
-# define RTREE_HEIGHT 3
-#else
-# error Unsupported number of significant virtual address bits
-#endif
-/* Use compact leaf representation if virtual address encoding allows. */
-#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
-# define RTREE_LEAF_COMPACT
-#endif
-
-typedef struct rtree_node_elm_s rtree_node_elm_t;
-struct rtree_node_elm_s {
- atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
-};
-
-typedef struct rtree_metadata_s rtree_metadata_t;
-struct rtree_metadata_s {
- szind_t szind;
- extent_state_t state; /* Mirrors edata->state. */
- bool is_head; /* Mirrors edata->is_head. */
- bool slab;
-};
-
-typedef struct rtree_contents_s rtree_contents_t;
-struct rtree_contents_s {
- edata_t *edata;
- rtree_metadata_t metadata;
-};
-
-#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
-#define RTREE_LEAF_STATE_SHIFT 2
-#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
-
-struct rtree_leaf_elm_s {
-#ifdef RTREE_LEAF_COMPACT
- /*
- * Single pointer-width field containing all three leaf element fields.
- * For example, on a 64-bit x64 system with 48 significant virtual
- * memory address bits, the index, edata, and slab fields are packed as
- * such:
- *
- * x: index
- * e: edata
- * s: state
- * h: is_head
- * b: slab
- *
- * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb
- */
- atomic_p_t le_bits;
-#else
- atomic_p_t le_edata; /* (edata_t *) */
- /*
- * From high to low bits: szind (8 bits), state (4 bits), is_head, slab
- */
- atomic_u_t le_metadata;
-#endif
-};
-
-typedef struct rtree_level_s rtree_level_t;
-struct rtree_level_s {
- /* Number of key bits distinguished by this level. */
- unsigned bits;
- /*
- * Cumulative number of key bits distinguished by traversing to
- * corresponding tree level.
- */
- unsigned cumbits;
-};
-
-typedef struct rtree_s rtree_t;
-struct rtree_s {
- base_t *base;
- malloc_mutex_t init_lock;
- /* Number of elements based on rtree_levels[0].bits. */
-#if RTREE_HEIGHT > 1
- rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
-#else
- rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
-#endif
-};
-
-/*
- * Split the bits into one to three partitions depending on number of
- * significant bits. It the number of bits does not divide evenly into the
- * number of levels, place one remainder bit per level starting at the leaf
- * level.
- */
-static const rtree_level_t rtree_levels[] = {
-#if RTREE_HEIGHT == 1
- {RTREE_NSB, RTREE_NHIB + RTREE_NSB}
-#elif RTREE_HEIGHT == 2
- {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
- {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
-#elif RTREE_HEIGHT == 3
- {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
- {RTREE_NSB/3 + RTREE_NSB%3/2,
- RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
- {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
-#else
-# error Unsupported rtree height
-#endif
-};
-
-bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed);
-
-rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
- rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
-
-JEMALLOC_ALWAYS_INLINE unsigned
-rtree_leaf_maskbits(void) {
- unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
- unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
- rtree_levels[RTREE_HEIGHT-1].bits);
- return ptrbits - cumbits;
-}
-
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_leafkey(uintptr_t key) {
- uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1);
- return (key & mask);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-rtree_cache_direct_map(uintptr_t key) {
- return (size_t)((key >> rtree_leaf_maskbits()) &
- (RTREE_CTX_NCACHE - 1));
-}
-
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_subkey(uintptr_t key, unsigned level) {
- unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
- unsigned cumbits = rtree_levels[level].cumbits;
- unsigned shiftbits = ptrbits - cumbits;
- unsigned maskbits = rtree_levels[level].bits;
- uintptr_t mask = (ZU(1) << maskbits) - 1;
- return ((key >> shiftbits) & mask);
-}
-
-/*
- * Atomic getters.
- *
- * dependent: Reading a value on behalf of a pointer to a valid allocation
- * is guaranteed to be a clean read even without synchronization,
- * because the rtree update became visible in memory before the
- * pointer came into existence.
- * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
- * dependent on a previous rtree write, which means a stale read
- * could result if synchronization were omitted here.
- */
-# ifdef RTREE_LEAF_COMPACT
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
- return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
- ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
-}
-
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
- assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
- uintptr_t edata_bits = (uintptr_t)contents.edata
- & (((uintptr_t)1 << LG_VADDR) - 1);
-
- uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
- uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
- uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
- uintptr_t state_bits = (uintptr_t)contents.metadata.state <<
- RTREE_LEAF_STATE_SHIFT;
- uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits |
- slab_bits;
- assert((edata_bits & metadata_bits) == 0);
-
- return edata_bits | metadata_bits;
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_contents_t
-rtree_leaf_elm_bits_decode(uintptr_t bits) {
- rtree_contents_t contents;
- /* Do the easy things first. */
- contents.metadata.szind = bits >> LG_VADDR;
- contents.metadata.slab = (bool)(bits & 1);
- contents.metadata.is_head = (bool)(bits & (1 << 1));
-
- uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >>
- RTREE_LEAF_STATE_SHIFT;
- assert(state_bits <= extent_state_max);
- contents.metadata.state = (extent_state_t)state_bits;
-
- uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1);
-# ifdef __aarch64__
- /*
- * aarch64 doesn't sign extend the highest virtual address bit to set
- * the higher ones. Instead, the high bits get zeroed.
- */
- uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
- /* Mask off metadata. */
- uintptr_t mask = high_bit_mask & low_bit_mask;
- contents.edata = (edata_t *)(bits & mask);
-# else
- /* Restore sign-extended high bits, mask metadata bits. */
- contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
- >> RTREE_NHIB) & low_bit_mask);
-# endif
- assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
- return contents;
-}
-
-# endif /* RTREE_LEAF_COMPACT */
-
-JEMALLOC_ALWAYS_INLINE rtree_contents_t
-rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
- bool dependent) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits);
- return contents;
-#else
- rtree_contents_t contents;
- unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
- ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
- contents.metadata.slab = (bool)(metadata_bits & 1);
- contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
-
- uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >>
- RTREE_LEAF_STATE_SHIFT;
- assert(state_bits <= extent_state_max);
- contents.metadata.state = (extent_state_t)state_bits;
- contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT +
- RTREE_LEAF_STATE_WIDTH);
-
- contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
- ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
-
- return contents;
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE void
-rtree_contents_encode(rtree_contents_t contents, void **bits,
- unsigned *additional) {
-#ifdef RTREE_LEAF_COMPACT
- *bits = (void *)rtree_leaf_elm_bits_encode(contents);
-#else
- *additional = (unsigned)contents.metadata.slab
- | ((unsigned)contents.metadata.is_head << 1)
- | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT)
- | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT +
- RTREE_LEAF_STATE_WIDTH));
- *bits = contents.edata;
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE void
-rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, void *bits, unsigned additional) {
-#ifdef RTREE_LEAF_COMPACT
- atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE);
-#else
- atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE);
- /*
- * Write edata last, since the element is atomically considered valid
- * as soon as the edata field is non-NULL.
- */
- atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE);
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE void
-rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, rtree_contents_t contents) {
- assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
- void *bits;
- unsigned additional;
-
- rtree_contents_encode(contents, &bits, &additional);
- rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
-}
-
-/* The state field can be updated independently (and more frequently). */
-JEMALLOC_ALWAYS_INLINE void
-rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) {
- assert(elm1 != NULL);
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1,
- /* dependent */ true);
- bits &= ~RTREE_LEAF_STATE_MASK;
- bits |= state << RTREE_LEAF_STATE_SHIFT;
- atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE);
- if (elm2 != NULL) {
- atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE);
- }
-#else
- unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED);
- bits &= ~RTREE_LEAF_STATE_MASK;
- bits |= state << RTREE_LEAF_STATE_SHIFT;
- atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE);
- if (elm2 != NULL) {
- atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE);
- }
-#endif
-}
-
-/*
- * Tries to look up the key in the L1 cache, returning false if there's a hit, or
- * true if there's a miss.
- * Key is allowed to be NULL; returns true in this case.
- */
-JEMALLOC_ALWAYS_INLINE bool
-rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, rtree_leaf_elm_t **elm) {
- size_t slot = rtree_cache_direct_map(key);
- uintptr_t leafkey = rtree_leafkey(key);
- assert(leafkey != RTREE_LEAFKEY_INVALID);
-
- if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) {
- return true;
- }
-
- rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
- assert(leaf != NULL);
- uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
- *elm = &leaf[subkey];
-
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
-rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, bool init_missing) {
- assert(key != 0);
- assert(!dependent || !init_missing);
-
- size_t slot = rtree_cache_direct_map(key);
- uintptr_t leafkey = rtree_leafkey(key);
- assert(leafkey != RTREE_LEAFKEY_INVALID);
-
- /* Fast path: L1 direct mapped cache. */
- if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
- rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
- assert(leaf != NULL);
- uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
- return &leaf[subkey];
- }
- /*
- * Search the L2 LRU cache. On hit, swap the matching element into the
- * slot in L1 cache, and move the position in L2 up by 1.
- */
-#define RTREE_CACHE_CHECK_L2(i) do { \
- if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
- rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
- assert(leaf != NULL); \
- if (i > 0) { \
- /* Bubble up by one. */ \
- rtree_ctx->l2_cache[i].leafkey = \
- rtree_ctx->l2_cache[i - 1].leafkey; \
- rtree_ctx->l2_cache[i].leaf = \
- rtree_ctx->l2_cache[i - 1].leaf; \
- rtree_ctx->l2_cache[i - 1].leafkey = \
- rtree_ctx->cache[slot].leafkey; \
- rtree_ctx->l2_cache[i - 1].leaf = \
- rtree_ctx->cache[slot].leaf; \
- } else { \
- rtree_ctx->l2_cache[0].leafkey = \
- rtree_ctx->cache[slot].leafkey; \
- rtree_ctx->l2_cache[0].leaf = \
- rtree_ctx->cache[slot].leaf; \
- } \
- rtree_ctx->cache[slot].leafkey = leafkey; \
- rtree_ctx->cache[slot].leaf = leaf; \
- uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
- return &leaf[subkey]; \
- } \
-} while (0)
- /* Check the first cache entry. */
- RTREE_CACHE_CHECK_L2(0);
- /* Search the remaining cache elements. */
- for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
- RTREE_CACHE_CHECK_L2(i);
- }
-#undef RTREE_CACHE_CHECK_L2
-
- return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
- dependent, init_missing);
-}
-
-/*
- * Returns true on lookup failure.
- */
-static inline bool
-rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, rtree_contents_t *r_contents) {
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, /* dependent */ false, /* init_missing */ false);
- if (elm == NULL) {
- return true;
- }
- *r_contents = rtree_leaf_elm_read(tsdn, rtree, elm,
- /* dependent */ false);
- return false;
-}
-
-static inline rtree_contents_t
-rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key) {
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, /* dependent */ true, /* init_missing */ false);
- assert(elm != NULL);
- return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true);
-}
-
-static inline rtree_metadata_t
-rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key) {
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, /* dependent */ true, /* init_missing */ false);
- assert(elm != NULL);
- return rtree_leaf_elm_read(tsdn, rtree, elm,
- /* dependent */ true).metadata;
-}
-
-/*
- * Returns true when the request cannot be fulfilled by fastpath.
- */
-static inline bool
-rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
- rtree_leaf_elm_t *elm;
- /*
- * Should check the bool return value (lookup success or not) instead of
- * elm == NULL (which will result in an extra branch). This is because
- * when the cache lookup succeeds, there will never be a NULL pointer
- * returned (which is unknown to the compiler).
- */
- if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) {
- return true;
- }
- assert(elm != NULL);
- *r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm,
- /* dependent */ true).metadata;
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) {
- assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0);
- /*
- * Only used for emap_(de)register_interior, which implies the
- * boundaries have been registered already. Therefore all the lookups
- * are dependent w/o init_missing, assuming the range spans across at
- * most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
- */
- void *bits;
- unsigned additional;
- rtree_contents_encode(contents, &bits, &additional);
-
- rtree_leaf_elm_t *elm = NULL; /* Dead store. */
- for (uintptr_t addr = base; addr <= end; addr += PAGE) {
- if (addr == base ||
- (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
- elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
- /* dependent */ true, /* init_missing */ false);
- assert(elm != NULL);
- }
- assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
- /* dependent */ true, /* init_missing */ false));
- assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm,
- /* dependent */ true).edata != NULL);
- rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
- elm++;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t base, uintptr_t end, rtree_contents_t contents) {
- rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
- /* clearing */ false);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- rtree_contents_t contents) {
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, /* dependent */ false, /* init_missing */ true);
- if (elm == NULL) {
- return true;
- }
-
- rtree_leaf_elm_write(tsdn, rtree, elm, contents);
-
- return false;
-}
-
-static inline void
-rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key) {
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, /* dependent */ true, /* init_missing */ false);
- assert(elm != NULL);
- assert(rtree_leaf_elm_read(tsdn, rtree, elm,
- /* dependent */ true).edata != NULL);
- rtree_contents_t contents;
- contents.edata = NULL;
- contents.metadata.szind = SC_NSIZES;
- contents.metadata.slab = false;
- contents.metadata.is_head = false;
- contents.metadata.state = (extent_state_t)0;
- rtree_leaf_elm_write(tsdn, rtree, elm, contents);
-}
-
-static inline void
-rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t base, uintptr_t end) {
- rtree_contents_t contents;
- contents.edata = NULL;
- contents.metadata.szind = SC_NSIZES;
- contents.metadata.slab = false;
- contents.metadata.is_head = false;
- contents.metadata.state = (extent_state_t)0;
- rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
- /* clearing */ true);
-}
-
-#endif /* JEMALLOC_INTERNAL_RTREE_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rtree_tsd.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rtree_tsd.h
deleted file mode 100644
index e45525c5..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/rtree_tsd.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
-#define JEMALLOC_INTERNAL_RTREE_CTX_H
-
-/*
- * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
- * entry supports an entire leaf, so the cache hit rate is typically high even
- * with a small number of entries. In rare cases extent activity will straddle
- * the boundary between two leaf nodes. Furthermore, an arena may use a
- * combination of dss and mmap. Note that as memory usage grows past the amount
- * that this cache can directly cover, the cache will become less effective if
- * locality of reference is low, but the consequence is merely cache misses
- * while traversing the tree nodes.
- *
- * The L1 direct mapped cache offers consistent and low cost on cache hit.
- * However collision could affect hit rate negatively. This is resolved by
- * combining with a L2 LRU cache, which requires linear search and re-ordering
- * on access but suffers no collision. Note that, the cache will itself suffer
- * cache misses if made overly large, plus the cost of linear search in the LRU
- * cache.
- */
-#define RTREE_CTX_NCACHE 16
-#define RTREE_CTX_NCACHE_L2 8
-
-/* Needed for initialization only. */
-#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
-#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL}
-
-#define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID
-#define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1
-#define RTREE_CTX_INIT_ELM_4 RTREE_CTX_INIT_ELM_2, RTREE_CTX_INIT_ELM_2
-#define RTREE_CTX_INIT_ELM_8 RTREE_CTX_INIT_ELM_4, RTREE_CTX_INIT_ELM_4
-#define RTREE_CTX_INIT_ELM_16 RTREE_CTX_INIT_ELM_8, RTREE_CTX_INIT_ELM_8
-
-#define _RTREE_CTX_INIT_ELM_DATA(n) RTREE_CTX_INIT_ELM_##n
-#define RTREE_CTX_INIT_ELM_DATA(n) _RTREE_CTX_INIT_ELM_DATA(n)
-
-/*
- * Static initializer (to invalidate the cache entries) is required because the
- * free fastpath may access the rtree cache before a full tsd initialization.
- */
-#define RTREE_CTX_INITIALIZER {{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \
- {RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}}
-
-typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
-
-typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
-struct rtree_ctx_cache_elm_s {
- uintptr_t leafkey;
- rtree_leaf_elm_t *leaf;
-};
-
-typedef struct rtree_ctx_s rtree_ctx_t;
-struct rtree_ctx_s {
- /* Direct mapped cache. */
- rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
- /* L2 LRU cache. */
- rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
-};
-
-void rtree_ctx_data_init(rtree_ctx_t *ctx);
-
-#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/safety_check.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/safety_check.h
deleted file mode 100644
index f1a74f17..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/safety_check.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
-#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
-
-void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
- size_t true_size, size_t input_size);
-void safety_check_fail(const char *format, ...);
-
-typedef void (*safety_check_abort_hook_t)(const char *message);
-
-/* Can set to NULL for a default. */
-void safety_check_set_abort(safety_check_abort_hook_t abort_fn);
-
-JEMALLOC_ALWAYS_INLINE void
-safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
- assert(usize < bumped_usize);
- for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
- *((unsigned char *)ptr + i) = 0xBC;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize)
-{
- for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
- if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) {
- safety_check_fail("Use after free error\n");
- }
- }
-}
-
-#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/san.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/san.h
deleted file mode 100644
index 8813d6bb..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/san.h
+++ /dev/null
@@ -1,191 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_GUARD_H
-#define JEMALLOC_INTERNAL_GUARD_H
-
-#include "jemalloc/internal/ehooks.h"
-#include "jemalloc/internal/emap.h"
-
-#define SAN_PAGE_GUARD PAGE
-#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2)
-
-#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
-#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
-
-#define SAN_LG_UAF_ALIGN_DEFAULT (-1)
-#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1)
-
-static const uintptr_t uaf_detect_junk = (uintptr_t)0x5b5b5b5b5b5b5b5bULL;
-
-/* 0 means disabled, i.e. never guarded. */
-extern size_t opt_san_guard_large;
-extern size_t opt_san_guard_small;
-/* -1 means disabled, i.e. never check for use-after-free. */
-extern ssize_t opt_lg_san_uaf_align;
-
-void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- emap_t *emap, bool left, bool right, bool remap);
-void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- emap_t *emap, bool left, bool right);
-/*
- * Unguard the extent, but don't modify emap boundaries. Must be called on an
- * extent that has been erased from emap and shouldn't be placed back.
- */
-void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks,
- edata_t *edata, emap_t *emap);
-void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize);
-
-void tsd_san_init(tsd_t *tsd);
-void san_init(ssize_t lg_san_uaf_align);
-
-static inline void
-san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- emap_t *emap, bool remap) {
- san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap);
-}
-
-static inline void
-san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
- emap_t *emap) {
- san_unguard_pages(tsdn, ehooks, edata, emap, true, true);
-}
-
-static inline size_t
-san_two_side_unguarded_sz(size_t size) {
- assert(size % PAGE == 0);
- assert(size >= SAN_PAGE_GUARDS_SIZE);
- return size - SAN_PAGE_GUARDS_SIZE;
-}
-
-static inline size_t
-san_two_side_guarded_sz(size_t size) {
- assert(size % PAGE == 0);
- return size + SAN_PAGE_GUARDS_SIZE;
-}
-
-static inline size_t
-san_one_side_unguarded_sz(size_t size) {
- assert(size % PAGE == 0);
- assert(size >= SAN_PAGE_GUARD);
- return size - SAN_PAGE_GUARD;
-}
-
-static inline size_t
-san_one_side_guarded_sz(size_t size) {
- assert(size % PAGE == 0);
- return size + SAN_PAGE_GUARD;
-}
-
-static inline bool
-san_guard_enabled(void) {
- return (opt_san_guard_large != 0 || opt_san_guard_small != 0);
-}
-
-static inline bool
-san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
- size_t alignment) {
- if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) ||
- tsdn_null(tsdn)) {
- return false;
- }
-
- tsd_t *tsd = tsdn_tsd(tsdn);
- uint64_t n = tsd_san_extents_until_guard_large_get(tsd);
- assert(n >= 1);
- if (n > 1) {
- /*
- * Subtract conditionally because the guard may not happen due
- * to alignment or size restriction below.
- */
- *tsd_san_extents_until_guard_largep_get(tsd) = n - 1;
- }
-
- if (n == 1 && (alignment <= PAGE) &&
- (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) {
- *tsd_san_extents_until_guard_largep_get(tsd) =
- opt_san_guard_large;
- return true;
- } else {
- assert(tsd_san_extents_until_guard_large_get(tsd) >= 1);
- return false;
- }
-}
-
-static inline bool
-san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
- if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) ||
- tsdn_null(tsdn)) {
- return false;
- }
-
- tsd_t *tsd = tsdn_tsd(tsdn);
- uint64_t n = tsd_san_extents_until_guard_small_get(tsd);
- assert(n >= 1);
- if (n == 1) {
- *tsd_san_extents_until_guard_smallp_get(tsd) =
- opt_san_guard_small;
- return true;
- } else {
- *tsd_san_extents_until_guard_smallp_get(tsd) = n - 1;
- assert(tsd_san_extents_until_guard_small_get(tsd) >= 1);
- return false;
- }
-}
-
-static inline void
-san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
- void **last) {
- size_t ptr_sz = sizeof(void *);
-
- *first = ptr;
-
- *mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1)));
- assert(*first != *mid || usize == ptr_sz);
- assert((uintptr_t)*first <= (uintptr_t)*mid);
-
- /*
- * When usize > 32K, the gap between requested_size and usize might be
- * greater than 4K -- this means the last write may access an
- * likely-untouched page (default settings w/ 4K pages). However by
- * default the tcache only goes up to the 32K size class, and is usually
- * tuned lower instead of higher, which makes it less of a concern.
- */
- *last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk));
- assert(*first != *last || usize == ptr_sz);
- assert(*mid != *last || usize <= ptr_sz * 2);
- assert((uintptr_t)*mid <= (uintptr_t)*last);
-}
-
-static inline bool
-san_junk_ptr_should_slow(void) {
- /*
- * The latter condition (pointer size greater than the min size class)
- * is not expected -- fall back to the slow path for simplicity.
- */
- return config_debug || (LG_SIZEOF_PTR > SC_LG_TINY_MIN);
-}
-
-static inline void
-san_junk_ptr(void *ptr, size_t usize) {
- if (san_junk_ptr_should_slow()) {
- memset(ptr, (char)uaf_detect_junk, usize);
- return;
- }
-
- void *first, *mid, *last;
- san_junk_ptr_locations(ptr, usize, &first, &mid, &last);
- *(uintptr_t *)first = uaf_detect_junk;
- *(uintptr_t *)mid = uaf_detect_junk;
- *(uintptr_t *)last = uaf_detect_junk;
-}
-
-static inline bool
-san_uaf_detection_enabled(void) {
- bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1);
- if (config_uaf_detection && ret) {
- assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 <<
- opt_lg_san_uaf_align) - 1);
- }
-
- return ret;
-}
-
-#endif /* JEMALLOC_INTERNAL_GUARD_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/san_bump.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/san_bump.h
deleted file mode 100644
index 8ec4a710..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/san_bump.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H
-#define JEMALLOC_INTERNAL_SAN_BUMP_H
-
-#include "jemalloc/internal/edata.h"
-#include "jemalloc/internal/exp_grow.h"
-#include "jemalloc/internal/mutex.h"
-
-#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
-
-extern bool opt_retain;
-
-typedef struct ehooks_s ehooks_t;
-typedef struct pac_s pac_t;
-
-typedef struct san_bump_alloc_s san_bump_alloc_t;
-struct san_bump_alloc_s {
- malloc_mutex_t mtx;
-
- edata_t *curr_reg;
-};
-
-static inline bool
-san_bump_enabled() {
- /*
- * We enable san_bump allocator only when it's possible to break up a
- * mapping and unmap a part of it (maps_coalesce). This is needed to
- * ensure the arena destruction process can destroy all retained guarded
- * extents one by one and to unmap a trailing part of a retained guarded
- * region when it's too small to fit a pending allocation.
- * opt_retain is required, because this allocator retains a large
- * virtual memory mapping and returns smaller parts of it.
- */
- return maps_coalesce && opt_retain;
-}
-
-static inline bool
-san_bump_alloc_init(san_bump_alloc_t* sba) {
- bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
- WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
- if (err) {
- return true;
- }
- sba->curr_reg = NULL;
-
- return false;
-}
-
-edata_t *
-san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
- size_t size, bool zero);
-
-#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sc.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sc.h
deleted file mode 100644
index 9bab347b..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sc.h
+++ /dev/null
@@ -1,357 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SC_H
-#define JEMALLOC_INTERNAL_SC_H
-
-#include "jemalloc/internal/jemalloc_internal_types.h"
-
-/*
- * Size class computations:
- *
- * These are a little tricky; we'll first start by describing how things
- * generally work, and then describe some of the details.
- *
- * Ignore the first few size classes for a moment. We can then split all the
- * remaining size classes into groups. The size classes in a group are spaced
- * such that they cover allocation request sizes in a power-of-2 range. The
- * power of two is called the base of the group, and the size classes in it
- * satisfy allocations in the half-open range (base, base * 2]. There are
- * SC_NGROUP size classes in each group, equally spaced in the range, so that
- * each one covers allocations for base / SC_NGROUP possible allocation sizes.
- * We call that value (base / SC_NGROUP) the delta of the group. Each size class
- * is delta larger than the one before it (including the initial size class in a
- * group, which is delta larger than base, the largest size class in the
- * previous group).
- * To make the math all work out nicely, we require that SC_NGROUP is a power of
- * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of
- * lg_base and lg_delta. For each of these groups then, we have that
- * lg_delta == lg_base - SC_LG_NGROUP.
- * The size classes in a group with a given lg_base and lg_delta (which, recall,
- * can be computed from lg_base for these groups) are therefore:
- * base + 1 * delta
- * which covers allocations in (base, base + 1 * delta]
- * base + 2 * delta
- * which covers allocations in (base + 1 * delta, base + 2 * delta].
- * base + 3 * delta
- * which covers allocations in (base + 2 * delta, base + 3 * delta].
- * ...
- * base + SC_NGROUP * delta ( == 2 * base)
- * which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base].
- * (Note that currently SC_NGROUP is always 4, so the "..." is empty in
- * practice.)
- * Note that the last size class in the group is the next power of two (after
- * base), so that we've set up the induction correctly for the next group's
- * selection of delta.
- *
- * Now, let's start considering the first few size classes. Two extra constants
- * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures
- * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger
- * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we
- * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the
- * highest required alignment of a platform. For allocation sizes smaller than
- * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support
- * platforms with types with alignment larger than their size). To allow such
- * allocations (without wasting space unnecessarily), we introduce tiny size
- * classes; one per power of two, up until we hit the quantum size. There are
- * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes.
- *
- * Next, we have a size class of size (1 << LG_QUANTUM). This can't be the
- * start of a group in the sense we described above (covering a power of two
- * range) since, if we divided into it to pick a value of delta, we'd get a
- * delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which
- * is against the rules.
- *
- * The first base we can divide by SC_NGROUP while still being at least
- * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by
- * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size
- * classes are:
- * 1 * (1 << LG_QUANTUM)
- * 2 * (1 << LG_QUANTUM)
- * 3 * (1 << LG_QUANTUM)
- * ... (although, as above, this "..." is empty in practice)
- * SC_NGROUP * (1 << LG_QUANTUM).
- *
- * There are SC_NGROUP of these size classes, so we can regard it as a sort of
- * pseudo-group, even though it spans multiple powers of 2, is divided
- * differently, and both starts and ends on a power of 2 (as opposed to just
- * ending). SC_NGROUP is itself a power of two, so the first group after the
- * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a
- * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP
- * sizes without violating our LG_QUANTUM requirements, so we can safely set
- * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM).
- *
- * So, in order, the size classes are:
- *
- * Tiny size classes:
- * - Count: LG_QUANTUM - SC_LG_TINY_MIN.
- * - Sizes:
- * 1 << SC_LG_TINY_MIN
- * 1 << (SC_LG_TINY_MIN + 1)
- * 1 << (SC_LG_TINY_MIN + 2)
- * ...
- * 1 << (LG_QUANTUM - 1)
- *
- * Initial pseudo-group:
- * - Count: SC_NGROUP
- * - Sizes:
- * 1 * (1 << LG_QUANTUM)
- * 2 * (1 << LG_QUANTUM)
- * 3 * (1 << LG_QUANTUM)
- * ...
- * SC_NGROUP * (1 << LG_QUANTUM)
- *
- * Regular group 0:
- * - Count: SC_NGROUP
- * - Sizes:
- * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of
- * lg_base - SC_LG_NGROUP)
- * (1 << lg_base) + 1 * (1 << lg_delta)
- * (1 << lg_base) + 2 * (1 << lg_delta)
- * (1 << lg_base) + 3 * (1 << lg_delta)
- * ...
- * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
- *
- * Regular group 1:
- * - Count: SC_NGROUP
- * - Sizes:
- * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of
- * lg_base - SC_LG_NGROUP)
- * (1 << lg_base) + 1 * (1 << lg_delta)
- * (1 << lg_base) + 2 * (1 << lg_delta)
- * (1 << lg_base) + 3 * (1 << lg_delta)
- * ...
- * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
- *
- * ...
- *
- * Regular group N:
- * - Count: SC_NGROUP
- * - Sizes:
- * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of
- * lg_base - SC_LG_NGROUP)
- * (1 << lg_base) + 1 * (1 << lg_delta)
- * (1 << lg_base) + 2 * (1 << lg_delta)
- * (1 << lg_base) + 3 * (1 << lg_delta)
- * ...
- * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
- *
- *
- * Representation of metadata:
- * To make the math easy, we'll mostly work in lg quantities. We record lg_base,
- * lg_delta, and ndelta (i.e. number of deltas above the base) on a
- * per-size-class basis, and maintain the invariant that, across all size
- * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
- *
- * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP),
- * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
- *
- * For the initial tiny size classes (if any), lg_base is lg(size class size).
- * lg_delta is lg_base for the first size class, and lg_base - 1 for all
- * subsequent ones. ndelta is always 0.
- *
- * For the pseudo-group, if there are no tiny size classes, then we set
- * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
- * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta
- * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do
- * indeed get a power of two that way). If there *are* tiny size classes, then
- * the first size class needs to have lg_delta relative to the largest tiny size
- * class. We therefore set lg_base == LG_QUANTUM - 1,
- * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
- * pseudo-group the same.
- *
- *
- * Other terminology:
- * "Small" size classes mean those that are allocated out of bins, which is the
- * same as those that are slab allocated.
- * "Large" size classes are those that are not small. The cutoff for counting as
- * large is page size * group size.
- */
-
-/*
- * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N.
- */
-#define SC_LG_NGROUP 2
-#define SC_LG_TINY_MIN 3
-
-#if SC_LG_TINY_MIN == 0
-/* The div module doesn't support division by 1, which this would require. */
-#error "Unsupported LG_TINY_MIN"
-#endif
-
-/*
- * The definitions below are all determined by the above settings and system
- * characteristics.
- */
-#define SC_NGROUP (1ULL << SC_LG_NGROUP)
-#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
-#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
-#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1)
-#define SC_NPSEUDO SC_NGROUP
-#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
-/*
- * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base
- * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1
- * size class shorter than the others).
- * We could probably save some space in arenas by capping this at LG_VADDR size.
- */
-#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
-#define SC_NREGULAR (SC_NGROUP * \
- (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
-#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
-
-/*
- * The number of size classes that are a multiple of the page size.
- *
- * Here are the first few bases that have a page-sized SC.
- *
- * lg(base) | base | highest SC | page-multiple SCs
- * --------------|------------------------------------------
- * LG_PAGE - 1 | PAGE / 2 | PAGE | 1
- * LG_PAGE | PAGE | 2 * PAGE | 1
- * LG_PAGE + 1 | 2 * PAGE | 4 * PAGE | 2
- * LG_PAGE + 2 | 4 * PAGE | 8 * PAGE | 4
- *
- * The number of page-multiple SCs continues to grow in powers of two, up until
- * lg_delta == lg_page, which corresponds to setting lg_base to lg_page +
- * SC_LG_NGROUP. So, then, the number of size classes that are multiples of the
- * page size whose lg_delta is less than the page size are
- * is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup.
- *
- * For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are
- * NGROUP page-sized size classes, and when lg_base == lg_base_max, there are
- * NGROUP - 1.
- *
- * This gives us the quantity we seek.
- */
-#define SC_NPSIZES ( \
- SC_NGROUP \
- + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \
- + SC_NGROUP - 1)
-
-/*
- * We declare a size class is binnable if size < page size * group. Or, in other
- * words, lg(size) < lg(page size) + lg(group size).
- */
-#define SC_NBINS ( \
- /* Sub-regular size classes. */ \
- SC_NTINY + SC_NPSEUDO \
- /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \
- + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \
- /* Last SC of the last group hits the bound exactly; exclude it. */ \
- - 1)
-
-/*
- * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
- * cannot support more than 256 small size classes.
- */
-#if (SC_NBINS > 256)
-# error "Too many small size classes"
-#endif
-
-/* The largest size class in the lookup table, and its binary log. */
-#define SC_LG_MAX_LOOKUP 12
-#define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP)
-
-/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
-#define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1))
-#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1))
-
-/* The largest size class allocated out of a slab. */
-#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
- + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
-
-/* The fastpath assumes all lookup-able sizes are small. */
-#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS)
-# error "Lookup table sizes must be small"
-#endif
-
-/* The smallest size class not allocated out of a slab. */
-#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
-#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
-
-/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
-#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
-#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
-
-/* The largest size class supported. */
-#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
-
-/* Maximum number of regions in one slab. */
-#ifndef CONFIG_LG_SLAB_MAXREGS
-# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
-#else
-# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
-# error "Unsupported SC_LG_SLAB_MAXREGS"
-# else
-# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
-# endif
-#endif
-
-#define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS)
-
-typedef struct sc_s sc_t;
-struct sc_s {
- /* Size class index, or -1 if not a valid size class. */
- int index;
- /* Lg group base size (no deltas added). */
- int lg_base;
- /* Lg delta to previous size class. */
- int lg_delta;
- /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
- int ndelta;
- /*
- * True if the size class is a multiple of the page size, false
- * otherwise.
- */
- bool psz;
- /*
- * True if the size class is a small, bin, size class. False otherwise.
- */
- bool bin;
- /* The slab page count if a small bin size class, 0 otherwise. */
- int pgs;
- /* Same as lg_delta if a lookup table size class, 0 otherwise. */
- int lg_delta_lookup;
-};
-
-typedef struct sc_data_s sc_data_t;
-struct sc_data_s {
- /* Number of tiny size classes. */
- unsigned ntiny;
- /* Number of bins supported by the lookup table. */
- int nlbins;
- /* Number of small size class bins. */
- int nbins;
- /* Number of size classes. */
- int nsizes;
- /* Number of bits required to store NSIZES. */
- int lg_ceil_nsizes;
- /* Number of size classes that are a multiple of (1U << LG_PAGE). */
- unsigned npsizes;
- /* Lg of maximum tiny size class (or -1, if none). */
- int lg_tiny_maxclass;
- /* Maximum size class included in lookup table. */
- size_t lookup_maxclass;
- /* Maximum small size class. */
- size_t small_maxclass;
- /* Lg of minimum large size class. */
- int lg_large_minclass;
- /* The minimum large size class. */
- size_t large_minclass;
- /* Maximum (large) size class. */
- size_t large_maxclass;
- /* True if the sc_data_t has been initialized (for debugging only). */
- bool initialized;
-
- sc_t sc[SC_NSIZES];
-};
-
-size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
-void sc_data_init(sc_data_t *data);
-/*
- * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
- * Otherwise, does its best to accommodate the request.
- */
-void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
- int pgs);
-void sc_boot(sc_data_t *data);
-
-#endif /* JEMALLOC_INTERNAL_SC_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sec.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sec.h
deleted file mode 100644
index fa863382..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sec.h
+++ /dev/null
@@ -1,120 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SEC_H
-#define JEMALLOC_INTERNAL_SEC_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/pai.h"
-
-/*
- * Small extent cache.
- *
- * This includes some utilities to cache small extents. We have a per-pszind
- * bin with its own list of extents of that size. We don't try to do any
- * coalescing of extents (since it would in general require cross-shard locks or
- * knowledge of the underlying PAI implementation).
- */
-
-/*
- * For now, this is just one field; eventually, we'll probably want to get more
- * fine-grained data out (like per-size class statistics).
- */
-typedef struct sec_stats_s sec_stats_t;
-struct sec_stats_s {
- /* Sum of bytes_cur across all shards. */
- size_t bytes;
-};
-
-static inline void
-sec_stats_accum(sec_stats_t *dst, sec_stats_t *src) {
- dst->bytes += src->bytes;
-}
-
-/* A collections of free extents, all of the same size. */
-typedef struct sec_bin_s sec_bin_t;
-struct sec_bin_s {
- /*
- * When we fail to fulfill an allocation, we do a batch-alloc on the
- * underlying allocator to fill extra items, as well. We drop the SEC
- * lock while doing so, to allow operations on other bins to succeed.
- * That introduces the possibility of other threads also trying to
- * allocate out of this bin, failing, and also going to the backing
- * allocator. To avoid a thundering herd problem in which lots of
- * threads do batch allocs and overfill this bin as a result, we only
- * allow one batch allocation at a time for a bin. This bool tracks
- * whether or not some thread is already batch allocating.
- *
- * Eventually, the right answer may be a smarter sharding policy for the
- * bins (e.g. a mutex per bin, which would also be more scalable
- * generally; the batch-allocating thread could hold it while
- * batch-allocating).
- */
- bool being_batch_filled;
-
- /*
- * Number of bytes in this particular bin (as opposed to the
- * sec_shard_t's bytes_cur. This isn't user visible or reported in
- * stats; rather, it allows us to quickly determine the change in the
- * centralized counter when flushing.
- */
- size_t bytes_cur;
- edata_list_active_t freelist;
-};
-
-typedef struct sec_shard_s sec_shard_t;
-struct sec_shard_s {
- /*
- * We don't keep per-bin mutexes, even though that would allow more
- * sharding; this allows global cache-eviction, which in turn allows for
- * better balancing across free lists.
- */
- malloc_mutex_t mtx;
- /*
- * A SEC may need to be shut down (i.e. flushed of its contents and
- * prevented from further caching). To avoid tricky synchronization
- * issues, we just track enabled-status in each shard, guarded by a
- * mutex. In practice, this is only ever checked during brief races,
- * since the arena-level atomic boolean tracking HPA enabled-ness means
- * that we won't go down these pathways very often after custom extent
- * hooks are installed.
- */
- bool enabled;
- sec_bin_t *bins;
- /* Number of bytes in all bins in the shard. */
- size_t bytes_cur;
- /* The next pszind to flush in the flush-some pathways. */
- pszind_t to_flush_next;
-};
-
-typedef struct sec_s sec_t;
-struct sec_s {
- pai_t pai;
- pai_t *fallback;
-
- sec_opts_t opts;
- sec_shard_t *shards;
- pszind_t npsizes;
-};
-
-bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
- const sec_opts_t *opts);
-void sec_flush(tsdn_t *tsdn, sec_t *sec);
-void sec_disable(tsdn_t *tsdn, sec_t *sec);
-
-/*
- * Morally, these two stats methods probably ought to be a single one (and the
- * mutex_prof_data ought to live in the sec_stats_t. But splitting them apart
- * lets them fit easily into the pa_shard stats framework (which also has this
- * split), which simplifies the stats management.
- */
-void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats);
-void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
- mutex_prof_data_t *mutex_prof_data);
-
-/*
- * We use the arena lock ordering; these are acquired in phase 2 of forking, but
- * should be acquired before the underlying allocator mutexes.
- */
-void sec_prefork2(tsdn_t *tsdn, sec_t *sec);
-void sec_postfork_parent(tsdn_t *tsdn, sec_t *sec);
-void sec_postfork_child(tsdn_t *tsdn, sec_t *sec);
-
-#endif /* JEMALLOC_INTERNAL_SEC_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sec_opts.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sec_opts.h
deleted file mode 100644
index a3ad72fb..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sec_opts.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SEC_OPTS_H
-#define JEMALLOC_INTERNAL_SEC_OPTS_H
-
-/*
- * The configuration settings used by an sec_t. Morally, this is part of the
- * SEC interface, but we put it here for header-ordering reasons.
- */
-
-typedef struct sec_opts_s sec_opts_t;
-struct sec_opts_s {
- /*
- * We don't necessarily always use all the shards; requests are
- * distributed across shards [0, nshards - 1).
- */
- size_t nshards;
- /*
- * We'll automatically refuse to cache any objects in this sec if
- * they're larger than max_alloc bytes, instead forwarding such objects
- * directly to the fallback.
- */
- size_t max_alloc;
- /*
- * Exceeding this amount of cached extents in a shard causes us to start
- * flushing bins in that shard until we fall below bytes_after_flush.
- */
- size_t max_bytes;
- /*
- * The number of bytes (in all bins) we flush down to when we exceed
- * bytes_cur. We want this to be less than bytes_cur, because
- * otherwise we could get into situations where a shard undergoing
- * net-deallocation keeps bytes_cur very near to max_bytes, so that
- * most deallocations get immediately forwarded to the underlying PAI
- * implementation, defeating the point of the SEC.
- */
- size_t bytes_after_flush;
- /*
- * When we can't satisfy an allocation out of the SEC because there are
- * no available ones cached, we allocate multiple of that size out of
- * the fallback allocator. Eventually we might want to do something
- * cleverer, but for now we just grab a fixed number.
- */
- size_t batch_fill_extra;
-};
-
-#define SEC_OPTS_DEFAULT { \
- /* nshards */ \
- 4, \
- /* max_alloc */ \
- (32 * 1024) < PAGE ? PAGE : (32 * 1024), \
- /* max_bytes */ \
- 256 * 1024, \
- /* bytes_after_flush */ \
- 128 * 1024, \
- /* batch_fill_extra */ \
- 0 \
-}
-
-
-#endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/seq.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/seq.h
deleted file mode 100644
index ef2df4c6..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/seq.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SEQ_H
-#define JEMALLOC_INTERNAL_SEQ_H
-
-#include "jemalloc/internal/atomic.h"
-
-/*
- * A simple seqlock implementation.
- */
-
-#define seq_define(type, short_type) \
-typedef struct { \
- atomic_zu_t seq; \
- atomic_zu_t data[ \
- (sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)]; \
-} seq_##short_type##_t; \
- \
-/* \
- * No internal synchronization -- the caller must ensure that there's \
- * only a single writer at a time. \
- */ \
-static inline void \
-seq_store_##short_type(seq_##short_type##_t *dst, type *src) { \
- size_t buf[sizeof(dst->data) / sizeof(size_t)]; \
- buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \
- memcpy(buf, src, sizeof(type)); \
- size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \
- atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \
- atomic_fence(ATOMIC_RELEASE); \
- for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
- atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \
- } \
- atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \
-} \
- \
-/* Returns whether or not the read was consistent. */ \
-static inline bool \
-seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \
- size_t buf[sizeof(src->data) / sizeof(size_t)]; \
- size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \
- if (seq1 % 2 != 0) { \
- return false; \
- } \
- for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \
- buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \
- } \
- atomic_fence(ATOMIC_ACQUIRE); \
- size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \
- if (seq1 != seq2) { \
- return false; \
- } \
- memcpy(dst, buf, sizeof(type)); \
- return true; \
-}
-
-#endif /* JEMALLOC_INTERNAL_SEQ_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/slab_data.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/slab_data.h
deleted file mode 100644
index e821863d..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/slab_data.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SLAB_DATA_H
-#define JEMALLOC_INTERNAL_SLAB_DATA_H
-
-#include "jemalloc/internal/bitmap.h"
-
-typedef struct slab_data_s slab_data_t;
-struct slab_data_s {
- /* Per region allocated/deallocated bitmap. */
- bitmap_t bitmap[BITMAP_GROUPS_MAX];
-};
-
-#endif /* JEMALLOC_INTERNAL_SLAB_DATA_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/smoothstep.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/smoothstep.h
deleted file mode 100644
index 2e14430f..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/smoothstep.h
+++ /dev/null
@@ -1,232 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
-#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
-
-/*
- * This file was generated by the following command:
- * sh smoothstep.sh smoother 200 24 3 15
- */
-/******************************************************************************/
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- * 3 2
- * smoothstep(x) = -2x + 3x
- *
- * 5 4 3
- * smootherstep(x) = 6x - 15x + 10x
- *
- * 7 6 5 4
- * smootheststep(x) = -20x + 70x - 84x + 35x
- */
-
-#define SMOOTHSTEP_VARIANT "smoother"
-#define SMOOTHSTEP_NSTEPS 200
-#define SMOOTHSTEP_BFP 24
-#define SMOOTHSTEP \
- /* STEP(step, h, x, y) */ \
- STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
- STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
- STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
- STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
- STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
- STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
- STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
- STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
- STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
- STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
- STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
- STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
- STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
- STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
- STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
- STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
- STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
- STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
- STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
- STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
- STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
- STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
- STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
- STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
- STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
- STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
- STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
- STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
- STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
- STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
- STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
- STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
- STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
- STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
- STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
- STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
- STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
- STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
- STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
- STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
- STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
- STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
- STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
- STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
- STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
- STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
- STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
- STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
- STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
- STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
- STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
- STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
- STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
- STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
- STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
- STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
- STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
- STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
- STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
- STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
- STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
- STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
- STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
- STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
- STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
- STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
- STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
- STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
- STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
- STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
- STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
- STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
- STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
- STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
- STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
- STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
- STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
- STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
- STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
- STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
- STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
- STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
- STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
- STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
- STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
- STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
- STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
- STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
- STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
- STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
- STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
- STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
- STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
- STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
- STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
- STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
- STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
- STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
- STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
- STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
- STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
- STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
- STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
- STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
- STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
- STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
- STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
- STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
- STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
- STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
- STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
- STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
- STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
- STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
- STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
- STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
- STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
- STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
- STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
- STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
- STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
- STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
- STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
- STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
- STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
- STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
- STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
- STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
- STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
- STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
- STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
- STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
- STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
- STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
- STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
- STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
- STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
- STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
- STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
- STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
- STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
- STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
- STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
- STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
- STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
- STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
- STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
- STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
- STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
- STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
- STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
- STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
- STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
- STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
- STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
- STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
- STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
- STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
- STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
- STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
- STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
- STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
- STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
- STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
- STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
- STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
- STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
- STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
- STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
- STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
- STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
- STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
- STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
- STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
- STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
- STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
- STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
- STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
- STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
- STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
- STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
- STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
- STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
- STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
- STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
- STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
- STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
- STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
- STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
- STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
- STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
- STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
- STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
- STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
- STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
- STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
- STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
- STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
- STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
- STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
-
-#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/smoothstep.sh b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/smoothstep.sh
deleted file mode 100755
index 65de97bf..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/smoothstep.sh
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/bin/sh
-#
-# Generate a discrete lookup table for a sigmoid function in the smoothstep
-# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
-# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
-# the entries using a binary fixed point representation.
-#
-# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
-#
-# <variant> is in {smooth, smoother, smoothest}.
-# <nsteps> must be greater than zero.
-# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
-# <xprec> is x decimal precision.
-# <yprec> is y decimal precision.
-
-#set -x
-
-cmd="sh smoothstep.sh $*"
-variant=$1
-nsteps=$2
-bfp=$3
-xprec=$4
-yprec=$5
-
-case "${variant}" in
- smooth)
- ;;
- smoother)
- ;;
- smoothest)
- ;;
- *)
- echo "Unsupported variant"
- exit 1
- ;;
-esac
-
-smooth() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-smoother() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-smoothest() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-cat <<EOF
-#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
-#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
-
-/*
- * This file was generated by the following command:
- * $cmd
- */
-/******************************************************************************/
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- * 3 2
- * smoothstep(x) = -2x + 3x
- *
- * 5 4 3
- * smootherstep(x) = 6x - 15x + 10x
- *
- * 7 6 5 4
- * smootheststep(x) = -20x + 70x - 84x + 35x
- */
-
-#define SMOOTHSTEP_VARIANT "${variant}"
-#define SMOOTHSTEP_NSTEPS ${nsteps}
-#define SMOOTHSTEP_BFP ${bfp}
-#define SMOOTHSTEP \\
- /* STEP(step, h, x, y) */ \\
-EOF
-
-s=1
-while [ $s -le $nsteps ] ; do
- $variant ${s}
- x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
-
- s=$((s+1))
-done
-echo
-
-cat <<EOF
-#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
-EOF
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/spin.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/spin.h
deleted file mode 100644
index 22804c68..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/spin.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SPIN_H
-#define JEMALLOC_INTERNAL_SPIN_H
-
-#define SPIN_INITIALIZER {0U}
-
-typedef struct {
- unsigned iteration;
-} spin_t;
-
-static inline void
-spin_cpu_spinwait() {
-# if HAVE_CPU_SPINWAIT
- CPU_SPINWAIT;
-# else
- volatile int x = 0;
- x = x;
-# endif
-}
-
-static inline void
-spin_adaptive(spin_t *spin) {
- volatile uint32_t i;
-
- if (spin->iteration < 5) {
- for (i = 0; i < (1U << spin->iteration); i++) {
- spin_cpu_spinwait();
- }
- spin->iteration++;
- } else {
-#ifdef _WIN32
- SwitchToThread();
-#else
- sched_yield();
-#endif
- }
-}
-
-#undef SPIN_INLINE
-
-#endif /* JEMALLOC_INTERNAL_SPIN_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/stats.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/stats.h
deleted file mode 100644
index 727f7dcb..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/stats.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_STATS_H
-#define JEMALLOC_INTERNAL_STATS_H
-
-/* OPTION(opt, var_name, default, set_value_to) */
-#define STATS_PRINT_OPTIONS \
- OPTION('J', json, false, true) \
- OPTION('g', general, true, false) \
- OPTION('m', merged, config_stats, false) \
- OPTION('d', destroyed, config_stats, false) \
- OPTION('a', unmerged, config_stats, false) \
- OPTION('b', bins, true, false) \
- OPTION('l', large, true, false) \
- OPTION('x', mutex, true, false) \
- OPTION('e', extents, true, false) \
- OPTION('h', hpa, config_stats, false)
-
-enum {
-#define OPTION(o, v, d, s) stats_print_option_num_##v,
- STATS_PRINT_OPTIONS
-#undef OPTION
- stats_print_tot_num_options
-};
-
-/* Options for stats_print. */
-extern bool opt_stats_print;
-extern char opt_stats_print_opts[stats_print_tot_num_options+1];
-
-/* Utilities for stats_interval. */
-extern int64_t opt_stats_interval;
-extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
-
-#define STATS_INTERVAL_DEFAULT -1
-/*
- * Batch-increment the counter to reduce synchronization overhead. Each thread
- * merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the
- * BATCH_MAX for accuracy when the interval is huge (which is expected).
- */
-#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
-#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
-
-/* Only accessed by thread event. */
-uint64_t stats_interval_new_event_wait(tsd_t *tsd);
-uint64_t stats_interval_postponed_event_wait(tsd_t *tsd);
-void stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed);
-
-/* Implements je_malloc_stats_print. */
-void stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts);
-
-bool stats_boot(void);
-void stats_prefork(tsdn_t *tsdn);
-void stats_postfork_parent(tsdn_t *tsdn);
-void stats_postfork_child(tsdn_t *tsdn);
-
-#endif /* JEMALLOC_INTERNAL_STATS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sz.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sz.h
deleted file mode 100644
index 3c0fc1da..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/sz.h
+++ /dev/null
@@ -1,371 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_SIZE_H
-#define JEMALLOC_INTERNAL_SIZE_H
-
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/util.h"
-
-/*
- * sz module: Size computations.
- *
- * Some abbreviations used here:
- * p: Page
- * ind: Index
- * s, sz: Size
- * u: Usable size
- * a: Aligned
- *
- * These are not always used completely consistently, but should be enough to
- * interpret function names. E.g. sz_psz2ind converts page size to page size
- * index; sz_sa2u converts a (size, alignment) allocation request to the usable
- * size that would result from such an allocation.
- */
-
-/* Page size index type. */
-typedef unsigned pszind_t;
-
-/* Size class index type. */
-typedef unsigned szind_t;
-
-/*
- * sz_pind2sz_tab encodes the same information as could be computed by
- * sz_pind2sz_compute().
- */
-extern size_t sz_pind2sz_tab[SC_NPSIZES + 1];
-/*
- * sz_index2size_tab encodes the same information as could be computed (at
- * unacceptable cost in some code paths) by sz_index2size_compute().
- */
-extern size_t sz_index2size_tab[SC_NSIZES];
-/*
- * sz_size2index_tab is a compact lookup table that rounds request sizes up to
- * size classes. In order to reduce cache footprint, the table is compressed,
- * and all accesses are via sz_size2index().
- */
-extern uint8_t sz_size2index_tab[];
-
-/*
- * Padding for large allocations: PAGE when opt_cache_oblivious == true (to
- * enable cache index randomization); 0 otherwise.
- */
-extern size_t sz_large_pad;
-
-extern void sz_boot(const sc_data_t *sc_data, bool cache_oblivious);
-
-JEMALLOC_ALWAYS_INLINE pszind_t
-sz_psz2ind(size_t psz) {
- assert(psz > 0);
- if (unlikely(psz > SC_LARGE_MAXCLASS)) {
- return SC_NPSIZES;
- }
- /* x is the lg of the first base >= psz. */
- pszind_t x = lg_ceil(psz);
- /*
- * sc.h introduces a lot of size classes. These size classes are divided
- * into different size class groups. There is a very special size class
- * group, each size class in or after it is an integer multiple of PAGE.
- * We call it first_ps_rg. It means first page size regular group. The
- * range of first_ps_rg is (base, base * 2], and base == PAGE *
- * SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g.
- * off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1).
- */
- pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) ?
- 0 : x - (SC_LG_NGROUP + LG_PAGE);
-
- /*
- * Same as sc_s::lg_delta.
- * Delta for off_to_first_ps_rg == 1 is PAGE,
- * for each increase in offset, it's multiplied by two.
- * Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1).
- */
- pszind_t lg_delta = (off_to_first_ps_rg == 0) ?
- LG_PAGE : LG_PAGE + (off_to_first_ps_rg - 1);
-
- /*
- * Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7.
- * The leftmost bits whose len is lg_base decide the base of psz.
- * The rightmost bits whose len is lg_delta decide (pgz % PAGE).
- * The middle bits whose len is SC_LG_NGROUP decide ndelta.
- * ndelta is offset to the first size class in the size class group,
- * starts from 1.
- * If you don't know lg_base, ndelta or lg_delta, see sc.h.
- * |xxxxxxxxxxxxxxxxxxxx|------------------------|yyyyyyyyyyyyyyyyyyyyy|
- * |<-- len: lg_base -->|<-- len: SC_LG_NGROUP-->|<-- len: lg_delta -->|
- * |<-- ndelta -->|
- * rg_inner_off = ndelta - 1
- * Why use (psz - 1)?
- * To handle case: psz % (1 << lg_delta) == 0.
- */
- pszind_t rg_inner_off = (((psz - 1)) >> lg_delta) & (SC_NGROUP - 1);
-
- pszind_t base_ind = off_to_first_ps_rg << SC_LG_NGROUP;
- pszind_t ind = base_ind + rg_inner_off;
- return ind;
-}
-
-static inline size_t
-sz_pind2sz_compute(pszind_t pind) {
- if (unlikely(pind == SC_NPSIZES)) {
- return SC_LARGE_MAXCLASS + PAGE;
- }
- size_t grp = pind >> SC_LG_NGROUP;
- size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp)
- & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_PAGE-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t sz = grp_size + mod_size;
- return sz;
-}
-
-static inline size_t
-sz_pind2sz_lookup(pszind_t pind) {
- size_t ret = (size_t)sz_pind2sz_tab[pind];
- assert(ret == sz_pind2sz_compute(pind));
- return ret;
-}
-
-static inline size_t
-sz_pind2sz(pszind_t pind) {
- assert(pind < SC_NPSIZES + 1);
- return sz_pind2sz_lookup(pind);
-}
-
-static inline size_t
-sz_psz2u(size_t psz) {
- if (unlikely(psz > SC_LARGE_MAXCLASS)) {
- return SC_LARGE_MAXCLASS + PAGE;
- }
- size_t x = lg_floor((psz<<1)-1);
- size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
- LG_PAGE : x - SC_LG_NGROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (psz + delta_mask) & ~delta_mask;
- return usize;
-}
-
-static inline szind_t
-sz_size2index_compute(size_t size) {
- if (unlikely(size > SC_LARGE_MAXCLASS)) {
- return SC_NSIZES;
- }
-
- if (size == 0) {
- return 0;
- }
-#if (SC_NTINY != 0)
- if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
- szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
- szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
- return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
- }
-#endif
- {
- szind_t x = lg_floor((size<<1)-1);
- szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 :
- x - (SC_LG_NGROUP + LG_QUANTUM);
- szind_t grp = shift << SC_LG_NGROUP;
-
- szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
- ? LG_QUANTUM : x - SC_LG_NGROUP - 1;
-
- size_t delta_inverse_mask = ZU(-1) << lg_delta;
- szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << SC_LG_NGROUP) - 1);
-
- szind_t index = SC_NTINY + grp + mod;
- return index;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-sz_size2index_lookup_impl(size_t size) {
- assert(size <= SC_LOOKUP_MAXCLASS);
- return sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
- >> SC_LG_TINY_MIN];
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-sz_size2index_lookup(size_t size) {
- szind_t ret = sz_size2index_lookup_impl(size);
- assert(ret == sz_size2index_compute(size));
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-sz_size2index(size_t size) {
- if (likely(size <= SC_LOOKUP_MAXCLASS)) {
- return sz_size2index_lookup(size);
- }
- return sz_size2index_compute(size);
-}
-
-static inline size_t
-sz_index2size_compute(szind_t index) {
-#if (SC_NTINY > 0)
- if (index < SC_NTINY) {
- return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index));
- }
-#endif
- {
- size_t reduced_index = index - SC_NTINY;
- size_t grp = reduced_index >> SC_LG_NGROUP;
- size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) -
- 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_QUANTUM +
- (SC_LG_NGROUP-1))) << grp) & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_QUANTUM-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t usize = grp_size + mod_size;
- return usize;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_index2size_lookup_impl(szind_t index) {
- return sz_index2size_tab[index];
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_index2size_lookup(szind_t index) {
- size_t ret = sz_index2size_lookup_impl(index);
- assert(ret == sz_index2size_compute(index));
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_index2size(szind_t index) {
- assert(index < SC_NSIZES);
- return sz_index2size_lookup(index);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) {
- *ind = sz_size2index_lookup_impl(size);
- *usize = sz_index2size_lookup_impl(*ind);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_s2u_compute(size_t size) {
- if (unlikely(size > SC_LARGE_MAXCLASS)) {
- return 0;
- }
-
- if (size == 0) {
- size++;
- }
-#if (SC_NTINY > 0)
- if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
- size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
- size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
- return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
- (ZU(1) << lg_ceil));
- }
-#endif
- {
- size_t x = lg_floor((size<<1)-1);
- size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
- ? LG_QUANTUM : x - SC_LG_NGROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (size + delta_mask) & ~delta_mask;
- return usize;
- }
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-sz_s2u_lookup(size_t size) {
- size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
-
- assert(ret == sz_s2u_compute(size));
- return ret;
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-sz_s2u(size_t size) {
- if (likely(size <= SC_LOOKUP_MAXCLASS)) {
- return sz_s2u_lookup(size);
- }
- return sz_s2u_compute(size);
-}
-
-/*
- * Compute usable size that would result from allocating an object with the
- * specified size and alignment.
- */
-JEMALLOC_ALWAYS_INLINE size_t
-sz_sa2u(size_t size, size_t alignment) {
- size_t usize;
-
- assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
-
- /* Try for a small size class. */
- if (size <= SC_SMALL_MAXCLASS && alignment <= PAGE) {
- /*
- * Round size up to the nearest multiple of alignment.
- *
- * This done, we can take advantage of the fact that for each
- * small size class, every object is aligned at the smallest
- * power of two that is non-zero in the base two representation
- * of the size. For example:
- *
- * Size | Base 2 | Minimum alignment
- * -----+----------+------------------
- * 96 | 1100000 | 32
- * 144 | 10100000 | 32
- * 192 | 11000000 | 64
- */
- usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
- if (usize < SC_LARGE_MINCLASS) {
- return usize;
- }
- }
-
- /* Large size class. Beware of overflow. */
-
- if (unlikely(alignment > SC_LARGE_MAXCLASS)) {
- return 0;
- }
-
- /* Make sure result is a large size class. */
- if (size <= SC_LARGE_MINCLASS) {
- usize = SC_LARGE_MINCLASS;
- } else {
- usize = sz_s2u(size);
- if (usize < size) {
- /* size_t overflow. */
- return 0;
- }
- }
-
- /*
- * Calculate the multi-page mapping that large_palloc() would need in
- * order to guarantee the alignment.
- */
- if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
- /* size_t overflow. */
- return 0;
- }
- return usize;
-}
-
-size_t sz_psz_quantize_floor(size_t size);
-size_t sz_psz_quantize_ceil(size_t size);
-
-#endif /* JEMALLOC_INTERNAL_SIZE_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_externs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_externs.h
deleted file mode 100644
index a2ab7101..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_externs.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
-#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
-
-extern bool opt_tcache;
-extern size_t opt_tcache_max;
-extern ssize_t opt_lg_tcache_nslots_mul;
-extern unsigned opt_tcache_nslots_small_min;
-extern unsigned opt_tcache_nslots_small_max;
-extern unsigned opt_tcache_nslots_large;
-extern ssize_t opt_lg_tcache_shift;
-extern size_t opt_tcache_gc_incr_bytes;
-extern size_t opt_tcache_gc_delay_bytes;
-extern unsigned opt_lg_tcache_flush_small_div;
-extern unsigned opt_lg_tcache_flush_large_div;
-
-/*
- * Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
- * large-object bins.
- */
-extern unsigned nhbins;
-
-/* Maximum cached size class. */
-extern size_t tcache_maxclass;
-
-extern cache_bin_info_t *tcache_bin_info;
-
-/*
- * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
- * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
- * completely disjoint from this data structure. tcaches starts off as a sparse
- * array, so it has no physical memory footprint until individual pages are
- * touched. This allows the entire array to be allocated the first time an
- * explicit tcache is created without a disproportionate impact on memory usage.
- */
-extern tcaches_t *tcaches;
-
-size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
-void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, bool *tcache_success);
-
-void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
- szind_t binind, unsigned rem);
-void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
- szind_t binind, unsigned rem);
-void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *bin,
- szind_t binind, bool is_small);
-void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
- tcache_t *tcache, arena_t *arena);
-tcache_t *tcache_create_explicit(tsd_t *tsd);
-void tcache_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
-bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind);
-void tcaches_flush(tsd_t *tsd, unsigned ind);
-void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(tsdn_t *tsdn, base_t *base);
-void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
- tcache_t *tcache, arena_t *arena);
-void tcache_prefork(tsdn_t *tsdn);
-void tcache_postfork_parent(tsdn_t *tsdn);
-void tcache_postfork_child(tsdn_t *tsdn);
-void tcache_flush(tsd_t *tsd);
-bool tsd_tcache_data_init(tsd_t *tsd);
-bool tsd_tcache_enabled_data_init(tsd_t *tsd);
-
-void tcache_assert_initialized(tcache_t *tcache);
-
-/* Only accessed by thread event. */
-uint64_t tcache_gc_new_event_wait(tsd_t *tsd);
-uint64_t tcache_gc_postponed_event_wait(tsd_t *tsd);
-void tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed);
-uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd);
-uint64_t tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd);
-void tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_inlines.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_inlines.h
deleted file mode 100644
index 2634f145..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_inlines.h
+++ /dev/null
@@ -1,193 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
-#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
-
-#include "jemalloc/internal/bin.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/san.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/util.h"
-
-static inline bool
-tcache_enabled_get(tsd_t *tsd) {
- return tsd_tcache_enabled_get(tsd);
-}
-
-static inline void
-tcache_enabled_set(tsd_t *tsd, bool enabled) {
- bool was_enabled = tsd_tcache_enabled_get(tsd);
-
- if (!was_enabled && enabled) {
- tsd_tcache_data_init(tsd);
- } else if (was_enabled && !enabled) {
- tcache_cleanup(tsd);
- }
- /* Commit the state last. Above calls check current state. */
- tsd_tcache_enabled_set(tsd, enabled);
- tsd_slow_update(tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) {
- assert(ind < SC_NBINS);
- bool ret = (cache_bin_info_ncached_max(&tcache_bin_info[ind]) == 0);
- if (ret && bin != NULL) {
- /* small size class but cache bin disabled. */
- assert(ind >= nhbins);
- assert((uintptr_t)(*bin->stack_head) ==
- cache_bin_preceding_junk);
- }
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- size_t size, szind_t binind, bool zero, bool slow_path) {
- void *ret;
- bool tcache_success;
-
- assert(binind < SC_NBINS);
- cache_bin_t *bin = &tcache->bins[binind];
- ret = cache_bin_alloc(bin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
- bool tcache_hard_success;
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL)) {
- return NULL;
- }
- if (unlikely(tcache_small_bin_disabled(binind, bin))) {
- /* stats and zero are handled directly by the arena. */
- return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
- binind, zero);
- }
- tcache_bin_flush_stashed(tsd, tcache, bin, binind,
- /* is_small */ true);
-
- ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
- bin, binind, &tcache_hard_success);
- if (tcache_hard_success == false) {
- return NULL;
- }
- }
-
- assert(ret);
- if (unlikely(zero)) {
- size_t usize = sz_index2size(binind);
- assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
- memset(ret, 0, usize);
- }
- if (config_stats) {
- bin->tstats.nrequests++;
- }
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path) {
- void *ret;
- bool tcache_success;
-
- assert(binind >= SC_NBINS && binind < nhbins);
- cache_bin_t *bin = &tcache->bins[binind];
- ret = cache_bin_alloc(bin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
- /*
- * Only allocate one large object at a time, because it's quite
- * expensive to create one and not use it.
- */
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL)) {
- return NULL;
- }
- tcache_bin_flush_stashed(tsd, tcache, bin, binind,
- /* is_small */ false);
-
- ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
- if (ret == NULL) {
- return NULL;
- }
- } else {
- if (unlikely(zero)) {
- size_t usize = sz_index2size(binind);
- assert(usize <= tcache_maxclass);
- memset(ret, 0, usize);
- }
-
- if (config_stats) {
- bin->tstats.nrequests++;
- }
- }
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
- bool slow_path) {
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS);
-
- cache_bin_t *bin = &tcache->bins[binind];
- /*
- * Not marking the branch unlikely because this is past free_fastpath()
- * (which handles the most common cases), i.e. at this point it's often
- * uncommon cases.
- */
- if (cache_bin_nonfast_aligned(ptr)) {
- /* Junk unconditionally, even if bin is full. */
- san_junk_ptr(ptr, sz_index2size(binind));
- if (cache_bin_stash(bin, ptr)) {
- return;
- }
- assert(cache_bin_full(bin));
- /* Bin full; fall through into the flush branch. */
- }
-
- if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
- if (unlikely(tcache_small_bin_disabled(binind, bin))) {
- arena_dalloc_small(tsd_tsdn(tsd), ptr);
- return;
- }
- cache_bin_sz_t max = cache_bin_info_ncached_max(
- &tcache_bin_info[binind]);
- unsigned remain = max >> opt_lg_tcache_flush_small_div;
- tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
- bool ret = cache_bin_dalloc_easy(bin, ptr);
- assert(ret);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
- bool slow_path) {
-
- assert(tcache_salloc(tsd_tsdn(tsd), ptr)
- > SC_SMALL_MAXCLASS);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
-
- cache_bin_t *bin = &tcache->bins[binind];
- if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
- unsigned remain = cache_bin_info_ncached_max(
- &tcache_bin_info[binind]) >> opt_lg_tcache_flush_large_div;
- tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
- bool ret = cache_bin_dalloc_easy(bin, ptr);
- assert(ret);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcaches_get(tsd_t *tsd, unsigned ind) {
- tcaches_t *elm = &tcaches[ind];
- if (unlikely(elm->tcache == NULL)) {
- malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind);
- abort();
- } else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) {
- elm->tcache = tcache_create_explicit(tsd);
- }
- return elm->tcache;
-}
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_structs.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_structs.h
deleted file mode 100644
index 176d73de..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_structs.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
-#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
-
-#include "jemalloc/internal/cache_bin.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/ticker.h"
-#include "jemalloc/internal/tsd_types.h"
-
-/*
- * The tcache state is split into the slow and hot path data. Each has a
- * pointer to the other, and the data always comes in pairs. The layout of each
- * of them varies in practice; tcache_slow lives in the TSD for the automatic
- * tcache, and as part of a dynamic allocation for manual allocations. Keeping
- * a pointer to tcache_slow lets us treat these cases uniformly, rather than
- * splitting up the tcache [de]allocation code into those paths called with the
- * TSD tcache and those called with a manual tcache.
- */
-
-struct tcache_slow_s {
- /* Lets us track all the tcaches in an arena. */
- ql_elm(tcache_slow_t) link;
-
- /*
- * The descriptor lets the arena find our cache bins without seeing the
- * tcache definition. This enables arenas to aggregate stats across
- * tcaches without having a tcache dependency.
- */
- cache_bin_array_descriptor_t cache_bin_array_descriptor;
-
- /* The arena this tcache is associated with. */
- arena_t *arena;
- /* Next bin to GC. */
- szind_t next_gc_bin;
- /* For small bins, fill (ncached_max >> lg_fill_div). */
- uint8_t lg_fill_div[SC_NBINS];
- /* For small bins, whether has been refilled since last GC. */
- bool bin_refilled[SC_NBINS];
- /*
- * For small bins, the number of items we can pretend to flush before
- * actually flushing.
- */
- uint8_t bin_flush_delay_items[SC_NBINS];
- /*
- * The start of the allocation containing the dynamic allocation for
- * either the cache bins alone, or the cache bin memory as well as this
- * tcache_slow_t and its associated tcache_t.
- */
- void *dyn_alloc;
-
- /* The associated bins. */
- tcache_t *tcache;
-};
-
-struct tcache_s {
- tcache_slow_t *tcache_slow;
- cache_bin_t bins[TCACHE_NBINS_MAX];
-};
-
-/* Linkage for list of available (previously used) explicit tcache IDs. */
-struct tcaches_s {
- union {
- tcache_t *tcache;
- tcaches_t *next;
- };
-};
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_types.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_types.h
deleted file mode 100644
index 583677ea..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tcache_types.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
-#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
-
-#include "jemalloc/internal/sc.h"
-
-typedef struct tcache_slow_s tcache_slow_t;
-typedef struct tcache_s tcache_t;
-typedef struct tcaches_s tcaches_t;
-
-/*
- * tcache pointers close to NULL are used to encode state information that is
- * used for two purposes: preventing thread caching on a per thread basis and
- * cleaning up during thread shutdown.
- */
-#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
-#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
-#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
-#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
-
-/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
-#define TCACHE_ZERO_INITIALIZER {0}
-#define TCACHE_SLOW_ZERO_INITIALIZER {0}
-
-/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
-#define TCACHE_ENABLED_ZERO_INITIALIZER false
-
-/* Used for explicit tcache only. Means flushed but not destroyed. */
-#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
-
-#define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_maxclass = 8M */
-#define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT)
-#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \
- (TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1)
-
-#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/test_hooks.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/test_hooks.h
deleted file mode 100644
index 3d530b5c..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/test_hooks.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H
-#define JEMALLOC_INTERNAL_TEST_HOOKS_H
-
-extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)();
-extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)();
-
-#if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST)
-# define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
-
-# define open JEMALLOC_TEST_HOOK(open, test_hooks_libc_hook)
-# define read JEMALLOC_TEST_HOOK(read, test_hooks_libc_hook)
-# define write JEMALLOC_TEST_HOOK(write, test_hooks_libc_hook)
-# define readlink JEMALLOC_TEST_HOOK(readlink, test_hooks_libc_hook)
-# define close JEMALLOC_TEST_HOOK(close, test_hooks_libc_hook)
-# define creat JEMALLOC_TEST_HOOK(creat, test_hooks_libc_hook)
-# define secure_getenv JEMALLOC_TEST_HOOK(secure_getenv, test_hooks_libc_hook)
-/* Note that this is undef'd and re-define'd in src/prof.c. */
-# define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
-#else
-# define JEMALLOC_TEST_HOOK(fn, hook) fn
-#endif
-
-
-#endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/thread_event.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/thread_event.h
deleted file mode 100644
index 2f4e1b39..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/thread_event.h
+++ /dev/null
@@ -1,301 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_THREAD_EVENT_H
-#define JEMALLOC_INTERNAL_THREAD_EVENT_H
-
-#include "jemalloc/internal/tsd.h"
-
-/* "te" is short for "thread_event" */
-
-/*
- * TE_MIN_START_WAIT should not exceed the minimal allocation usize.
- */
-#define TE_MIN_START_WAIT ((uint64_t)1U)
-#define TE_MAX_START_WAIT UINT64_MAX
-
-/*
- * Maximum threshold on thread_(de)allocated_next_event_fast, so that there is
- * no need to check overflow in malloc fast path. (The allocation size in malloc
- * fast path never exceeds SC_LOOKUP_MAXCLASS.)
- */
-#define TE_NEXT_EVENT_FAST_MAX (UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U)
-
-/*
- * The max interval helps make sure that malloc stays on the fast path in the
- * common case, i.e. thread_allocated < thread_allocated_next_event_fast. When
- * thread_allocated is within an event's distance to TE_NEXT_EVENT_FAST_MAX
- * above, thread_allocated_next_event_fast is wrapped around and we fall back to
- * the medium-fast path. The max interval makes sure that we're not staying on
- * the fallback case for too long, even if there's no active event or if all
- * active events have long wait times.
- */
-#define TE_MAX_INTERVAL ((uint64_t)(4U << 20))
-
-/*
- * Invalid elapsed time, for situations where elapsed time is not needed. See
- * comments in thread_event.c for more info.
- */
-#define TE_INVALID_ELAPSED UINT64_MAX
-
-typedef struct te_ctx_s {
- bool is_alloc;
- uint64_t *current;
- uint64_t *last_event;
- uint64_t *next_event;
- uint64_t *next_event_fast;
-} te_ctx_t;
-
-void te_assert_invariants_debug(tsd_t *tsd);
-void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx);
-void te_recompute_fast_threshold(tsd_t *tsd);
-void tsd_te_init(tsd_t *tsd);
-
-/*
- * List of all events, in the following format:
- * E(event, (condition), is_alloc_event)
- */
-#define ITERATE_OVER_ALL_EVENTS \
- E(tcache_gc, (opt_tcache_gc_incr_bytes > 0), true) \
- E(prof_sample, (config_prof && opt_prof), true) \
- E(stats_interval, (opt_stats_interval >= 0), true) \
- E(tcache_gc_dalloc, (opt_tcache_gc_incr_bytes > 0), false) \
- E(peak_alloc, config_stats, true) \
- E(peak_dalloc, config_stats, false)
-
-#define E(event, condition_unused, is_alloc_event_unused) \
- C(event##_event_wait)
-
-/* List of all thread event counters. */
-#define ITERATE_OVER_ALL_COUNTERS \
- C(thread_allocated) \
- C(thread_allocated_last_event) \
- ITERATE_OVER_ALL_EVENTS \
- C(prof_sample_last_event) \
- C(stats_interval_last_event)
-
-/* Getters directly wrap TSD getters. */
-#define C(counter) \
-JEMALLOC_ALWAYS_INLINE uint64_t \
-counter##_get(tsd_t *tsd) { \
- return tsd_##counter##_get(tsd); \
-}
-
-ITERATE_OVER_ALL_COUNTERS
-#undef C
-
-/*
- * Setters call the TSD pointer getters rather than the TSD setters, so that
- * the counters can be modified even when TSD state is reincarnated or
- * minimal_initialized: if an event is triggered in such cases, we will
- * temporarily delay the event and let it be immediately triggered at the next
- * allocation call.
- */
-#define C(counter) \
-JEMALLOC_ALWAYS_INLINE void \
-counter##_set(tsd_t *tsd, uint64_t v) { \
- *tsd_##counter##p_get(tsd) = v; \
-}
-
-ITERATE_OVER_ALL_COUNTERS
-#undef C
-
-/*
- * For generating _event_wait getter / setter functions for each individual
- * event.
- */
-#undef E
-
-/*
- * The malloc and free fastpath getters -- use the unsafe getters since tsd may
- * be non-nominal, in which case the fast_threshold will be set to 0. This
- * allows checking for events and tsd non-nominal in a single branch.
- *
- * Note that these can only be used on the fastpath.
- */
-JEMALLOC_ALWAYS_INLINE void
-te_malloc_fastpath_ctx(tsd_t *tsd, uint64_t *allocated, uint64_t *threshold) {
- *allocated = *tsd_thread_allocatedp_get_unsafe(tsd);
- *threshold = *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd);
- assert(*threshold <= TE_NEXT_EVENT_FAST_MAX);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-te_free_fastpath_ctx(tsd_t *tsd, uint64_t *deallocated, uint64_t *threshold) {
- /* Unsafe getters since this may happen before tsd_init. */
- *deallocated = *tsd_thread_deallocatedp_get_unsafe(tsd);
- *threshold = *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd);
- assert(*threshold <= TE_NEXT_EVENT_FAST_MAX);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-te_ctx_is_alloc(te_ctx_t *ctx) {
- return ctx->is_alloc;
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-te_ctx_current_bytes_get(te_ctx_t *ctx) {
- return *ctx->current;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-te_ctx_current_bytes_set(te_ctx_t *ctx, uint64_t v) {
- *ctx->current = v;
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-te_ctx_last_event_get(te_ctx_t *ctx) {
- return *ctx->last_event;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-te_ctx_last_event_set(te_ctx_t *ctx, uint64_t v) {
- *ctx->last_event = v;
-}
-
-/* Below 3 for next_event_fast. */
-JEMALLOC_ALWAYS_INLINE uint64_t
-te_ctx_next_event_fast_get(te_ctx_t *ctx) {
- uint64_t v = *ctx->next_event_fast;
- assert(v <= TE_NEXT_EVENT_FAST_MAX);
- return v;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-te_ctx_next_event_fast_set(te_ctx_t *ctx, uint64_t v) {
- assert(v <= TE_NEXT_EVENT_FAST_MAX);
- *ctx->next_event_fast = v;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-te_next_event_fast_set_non_nominal(tsd_t *tsd) {
- /*
- * Set the fast thresholds to zero when tsd is non-nominal. Use the
- * unsafe getter as this may get called during tsd init and clean up.
- */
- *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd) = 0;
- *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) = 0;
-}
-
-/* For next_event. Setter also updates the fast threshold. */
-JEMALLOC_ALWAYS_INLINE uint64_t
-te_ctx_next_event_get(te_ctx_t *ctx) {
- return *ctx->next_event;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-te_ctx_next_event_set(tsd_t *tsd, te_ctx_t *ctx, uint64_t v) {
- *ctx->next_event = v;
- te_recompute_fast_threshold(tsd);
-}
-
-/*
- * The function checks in debug mode whether the thread event counters are in
- * a consistent state, which forms the invariants before and after each round
- * of thread event handling that we can rely on and need to promise.
- * The invariants are only temporarily violated in the middle of
- * te_event_advance() if an event is triggered (the te_event_trigger() call at
- * the end will restore the invariants).
- */
-JEMALLOC_ALWAYS_INLINE void
-te_assert_invariants(tsd_t *tsd) {
- if (config_debug) {
- te_assert_invariants_debug(tsd);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-te_ctx_get(tsd_t *tsd, te_ctx_t *ctx, bool is_alloc) {
- ctx->is_alloc = is_alloc;
- if (is_alloc) {
- ctx->current = tsd_thread_allocatedp_get(tsd);
- ctx->last_event = tsd_thread_allocated_last_eventp_get(tsd);
- ctx->next_event = tsd_thread_allocated_next_eventp_get(tsd);
- ctx->next_event_fast =
- tsd_thread_allocated_next_event_fastp_get(tsd);
- } else {
- ctx->current = tsd_thread_deallocatedp_get(tsd);
- ctx->last_event = tsd_thread_deallocated_last_eventp_get(tsd);
- ctx->next_event = tsd_thread_deallocated_next_eventp_get(tsd);
- ctx->next_event_fast =
- tsd_thread_deallocated_next_event_fastp_get(tsd);
- }
-}
-
-/*
- * The lookahead functionality facilitates events to be able to lookahead, i.e.
- * without touching the event counters, to determine whether an event would be
- * triggered. The event counters are not advanced until the end of the
- * allocation / deallocation calls, so the lookahead can be useful if some
- * preparation work for some event must be done early in the allocation /
- * deallocation calls.
- *
- * Currently only the profiling sampling event needs the lookahead
- * functionality, so we don't yet define general purpose lookahead functions.
- *
- * Surplus is a terminology referring to the amount of bytes beyond what's
- * needed for triggering an event, which can be a useful quantity to have in
- * general when lookahead is being called.
- */
-
-JEMALLOC_ALWAYS_INLINE bool
-te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize,
- size_t *surplus) {
- if (surplus != NULL) {
- /*
- * This is a dead store: the surplus will be overwritten before
- * any read. The initialization suppresses compiler warnings.
- * Meanwhile, using SIZE_MAX to initialize is good for
- * debugging purpose, because a valid surplus value is strictly
- * less than usize, which is at most SIZE_MAX.
- */
- *surplus = SIZE_MAX;
- }
- if (unlikely(!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0)) {
- return false;
- }
- /* The subtraction is intentionally susceptible to underflow. */
- uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize -
- tsd_thread_allocated_last_event_get(tsd);
- uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd);
- if (accumbytes < sample_wait) {
- return false;
- }
- assert(accumbytes - sample_wait < (uint64_t)usize);
- if (surplus != NULL) {
- *surplus = (size_t)(accumbytes - sample_wait);
- }
- return true;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-te_prof_sample_event_lookahead(tsd_t *tsd, size_t usize) {
- return te_prof_sample_event_lookahead_surplus(tsd, usize, NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-te_event_advance(tsd_t *tsd, size_t usize, bool is_alloc) {
- te_assert_invariants(tsd);
-
- te_ctx_t ctx;
- te_ctx_get(tsd, &ctx, is_alloc);
-
- uint64_t bytes_before = te_ctx_current_bytes_get(&ctx);
- te_ctx_current_bytes_set(&ctx, bytes_before + usize);
-
- /* The subtraction is intentionally susceptible to underflow. */
- if (likely(usize < te_ctx_next_event_get(&ctx) - bytes_before)) {
- te_assert_invariants(tsd);
- } else {
- te_event_trigger(tsd, &ctx);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-thread_dalloc_event(tsd_t *tsd, size_t usize) {
- te_event_advance(tsd, usize, false);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-thread_alloc_event(tsd_t *tsd, size_t usize) {
- te_event_advance(tsd, usize, true);
-}
-
-#endif /* JEMALLOC_INTERNAL_THREAD_EVENT_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ticker.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ticker.h
deleted file mode 100644
index 6b51ddec..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/ticker.h
+++ /dev/null
@@ -1,175 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TICKER_H
-#define JEMALLOC_INTERNAL_TICKER_H
-
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/util.h"
-
-/**
- * A ticker makes it easy to count-down events until some limit. You
- * ticker_init the ticker to trigger every nticks events. You then notify it
- * that an event has occurred with calls to ticker_tick (or that nticks events
- * have occurred with a call to ticker_ticks), which will return true (and reset
- * the counter) if the countdown hit zero.
- */
-typedef struct ticker_s ticker_t;
-struct ticker_s {
- int32_t tick;
- int32_t nticks;
-};
-
-static inline void
-ticker_init(ticker_t *ticker, int32_t nticks) {
- ticker->tick = nticks;
- ticker->nticks = nticks;
-}
-
-static inline void
-ticker_copy(ticker_t *ticker, const ticker_t *other) {
- *ticker = *other;
-}
-
-static inline int32_t
-ticker_read(const ticker_t *ticker) {
- return ticker->tick;
-}
-
-/*
- * Not intended to be a public API. Unfortunately, on x86, neither gcc nor
- * clang seems smart enough to turn
- * ticker->tick -= nticks;
- * if (unlikely(ticker->tick < 0)) {
- * fixup ticker
- * return true;
- * }
- * return false;
- * into
- * subq %nticks_reg, (%ticker_reg)
- * js fixup ticker
- *
- * unless we force "fixup ticker" out of line. In that case, gcc gets it right,
- * but clang now does worse than before. So, on x86 with gcc, we force it out
- * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
- * worth the hassle, but this is on the fast path of both malloc and free (via
- * tcache_event).
- */
-#if defined(__GNUC__) && !defined(__clang__) \
- && (defined(__x86_64__) || defined(__i386__))
-JEMALLOC_NOINLINE
-#endif
-static bool
-ticker_fixup(ticker_t *ticker) {
- ticker->tick = ticker->nticks;
- return true;
-}
-
-static inline bool
-ticker_ticks(ticker_t *ticker, int32_t nticks) {
- ticker->tick -= nticks;
- if (unlikely(ticker->tick < 0)) {
- return ticker_fixup(ticker);
- }
- return false;
-}
-
-static inline bool
-ticker_tick(ticker_t *ticker) {
- return ticker_ticks(ticker, 1);
-}
-
-/*
- * Try to tick. If ticker would fire, return true, but rely on
- * slowpath to reset ticker.
- */
-static inline bool
-ticker_trytick(ticker_t *ticker) {
- --ticker->tick;
- if (unlikely(ticker->tick < 0)) {
- return true;
- }
- return false;
-}
-
-/*
- * The ticker_geom_t is much like the ticker_t, except that instead of ticker
- * having a constant countdown, it has an approximate one; each tick has
- * approximately a 1/nticks chance of triggering the count.
- *
- * The motivation is in triggering arena decay. With a naive strategy, each
- * thread would maintain a ticker per arena, and check if decay is necessary
- * each time that the arena's ticker fires. This has two costs:
- * - Since under reasonable assumptions both threads and arenas can scale
- * linearly with the number of CPUs, maintaining per-arena data in each thread
- * scales quadratically with the number of CPUs.
- * - These tickers are often a cache miss down tcache flush pathways.
- *
- * By giving each tick a 1/nticks chance of firing, we still maintain the same
- * average number of ticks-until-firing per arena, with only a single ticker's
- * worth of metadata.
- */
-
-/* See ticker.c for an explanation of these constants. */
-#define TICKER_GEOM_NBITS 6
-#define TICKER_GEOM_MUL 61
-extern const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS];
-
-/* Not actually any different from ticker_t; just for type safety. */
-typedef struct ticker_geom_s ticker_geom_t;
-struct ticker_geom_s {
- int32_t tick;
- int32_t nticks;
-};
-
-/*
- * Just pick the average delay for the first counter. We're more concerned with
- * the behavior over long periods of time rather than the exact timing of the
- * initial ticks.
- */
-#define TICKER_GEOM_INIT(nticks) {nticks, nticks}
-
-static inline void
-ticker_geom_init(ticker_geom_t *ticker, int32_t nticks) {
- /*
- * Make sure there's no overflow possible. This shouldn't really be a
- * problem for reasonable nticks choices, which are all static and
- * relatively small.
- */
- assert((uint64_t)nticks * (uint64_t)255 / (uint64_t)TICKER_GEOM_MUL
- <= (uint64_t)INT32_MAX);
- ticker->tick = nticks;
- ticker->nticks = nticks;
-}
-
-static inline int32_t
-ticker_geom_read(const ticker_geom_t *ticker) {
- return ticker->tick;
-}
-
-/* Same deal as above. */
-#if defined(__GNUC__) && !defined(__clang__) \
- && (defined(__x86_64__) || defined(__i386__))
-JEMALLOC_NOINLINE
-#endif
-static bool
-ticker_geom_fixup(ticker_geom_t *ticker, uint64_t *prng_state) {
- uint64_t idx = prng_lg_range_u64(prng_state, TICKER_GEOM_NBITS);
- ticker->tick = (uint32_t)(
- (uint64_t)ticker->nticks * (uint64_t)ticker_geom_table[idx]
- / (uint64_t)TICKER_GEOM_MUL);
- return true;
-}
-
-static inline bool
-ticker_geom_ticks(ticker_geom_t *ticker, uint64_t *prng_state, int32_t nticks) {
- ticker->tick -= nticks;
- if (unlikely(ticker->tick < 0)) {
- return ticker_geom_fixup(ticker, prng_state);
- }
- return false;
-}
-
-static inline bool
-ticker_geom_tick(ticker_geom_t *ticker, uint64_t *prng_state) {
- return ticker_geom_ticks(ticker, prng_state, 1);
-}
-
-#endif /* JEMALLOC_INTERNAL_TICKER_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd.h
deleted file mode 100644
index 66d68822..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd.h
+++ /dev/null
@@ -1,518 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TSD_H
-#define JEMALLOC_INTERNAL_TSD_H
-
-#include "jemalloc/internal/activity_callback.h"
-#include "jemalloc/internal/arena_types.h"
-#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/bin_types.h"
-#include "jemalloc/internal/jemalloc_internal_externs.h"
-#include "jemalloc/internal/peak.h"
-#include "jemalloc/internal/prof_types.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/rtree_tsd.h"
-#include "jemalloc/internal/tcache_types.h"
-#include "jemalloc/internal/tcache_structs.h"
-#include "jemalloc/internal/util.h"
-#include "jemalloc/internal/witness.h"
-
-/*
- * Thread-Specific-Data layout
- *
- * At least some thread-local data gets touched on the fast-path of almost all
- * malloc operations. But much of it is only necessary down slow-paths, or
- * testing. We want to colocate the fast-path data so that it can live on the
- * same cacheline if possible. So we define three tiers of hotness:
- * TSD_DATA_FAST: Touched on the alloc/dalloc fast paths.
- * TSD_DATA_SLOW: Touched down slow paths. "Slow" here is sort of general;
- * there are "semi-slow" paths like "not a sized deallocation, but can still
- * live in the tcache". We'll want to keep these closer to the fast-path
- * data.
- * TSD_DATA_SLOWER: Only touched in test or debug modes, or not touched at all.
- *
- * An additional concern is that the larger tcache bins won't be used (we have a
- * bin per size class, but by default only cache relatively small objects). So
- * the earlier bins are in the TSD_DATA_FAST tier, but the later ones are in the
- * TSD_DATA_SLOWER tier.
- *
- * As a result of all this, we put the slow data first, then the fast data, then
- * the slower data, while keeping the tcache as the last element of the fast
- * data (so that the fast -> slower transition happens midway through the
- * tcache). While we don't yet play alignment tricks to guarantee it, this
- * increases our odds of getting some cache/page locality on fast paths.
- */
-
-#ifdef JEMALLOC_JET
-typedef void (*test_callback_t)(int *);
-# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
-# define MALLOC_TEST_TSD \
- O(test_data, int, int) \
- O(test_callback, test_callback_t, int)
-# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
-#else
-# define MALLOC_TEST_TSD
-# define MALLOC_TEST_TSD_INITIALIZER
-#endif
-
-typedef ql_elm(tsd_t) tsd_link_t;
-
-/* O(name, type, nullable type) */
-#define TSD_DATA_SLOW \
- O(tcache_enabled, bool, bool) \
- O(reentrancy_level, int8_t, int8_t) \
- O(thread_allocated_last_event, uint64_t, uint64_t) \
- O(thread_allocated_next_event, uint64_t, uint64_t) \
- O(thread_deallocated_last_event, uint64_t, uint64_t) \
- O(thread_deallocated_next_event, uint64_t, uint64_t) \
- O(tcache_gc_event_wait, uint64_t, uint64_t) \
- O(tcache_gc_dalloc_event_wait, uint64_t, uint64_t) \
- O(prof_sample_event_wait, uint64_t, uint64_t) \
- O(prof_sample_last_event, uint64_t, uint64_t) \
- O(stats_interval_event_wait, uint64_t, uint64_t) \
- O(stats_interval_last_event, uint64_t, uint64_t) \
- O(peak_alloc_event_wait, uint64_t, uint64_t) \
- O(peak_dalloc_event_wait, uint64_t, uint64_t) \
- O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
- O(prng_state, uint64_t, uint64_t) \
- O(san_extents_until_guard_small, uint64_t, uint64_t) \
- O(san_extents_until_guard_large, uint64_t, uint64_t) \
- O(iarena, arena_t *, arena_t *) \
- O(arena, arena_t *, arena_t *) \
- O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \
- O(sec_shard, uint8_t, uint8_t) \
- O(binshards, tsd_binshards_t, tsd_binshards_t)\
- O(tsd_link, tsd_link_t, tsd_link_t) \
- O(in_hook, bool, bool) \
- O(peak, peak_t, peak_t) \
- O(activity_callback_thunk, activity_callback_thunk_t, \
- activity_callback_thunk_t) \
- O(tcache_slow, tcache_slow_t, tcache_slow_t) \
- O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
-
-#define TSD_DATA_SLOW_INITIALIZER \
- /* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \
- /* reentrancy_level */ 0, \
- /* thread_allocated_last_event */ 0, \
- /* thread_allocated_next_event */ 0, \
- /* thread_deallocated_last_event */ 0, \
- /* thread_deallocated_next_event */ 0, \
- /* tcache_gc_event_wait */ 0, \
- /* tcache_gc_dalloc_event_wait */ 0, \
- /* prof_sample_event_wait */ 0, \
- /* prof_sample_last_event */ 0, \
- /* stats_interval_event_wait */ 0, \
- /* stats_interval_last_event */ 0, \
- /* peak_alloc_event_wait */ 0, \
- /* peak_dalloc_event_wait */ 0, \
- /* prof_tdata */ NULL, \
- /* prng_state */ 0, \
- /* san_extents_until_guard_small */ 0, \
- /* san_extents_until_guard_large */ 0, \
- /* iarena */ NULL, \
- /* arena */ NULL, \
- /* arena_decay_ticker */ \
- TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \
- /* sec_shard */ (uint8_t)-1, \
- /* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
- /* tsd_link */ {NULL}, \
- /* in_hook */ false, \
- /* peak */ PEAK_INITIALIZER, \
- /* activity_callback_thunk */ \
- ACTIVITY_CALLBACK_THUNK_INITIALIZER, \
- /* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \
- /* rtree_ctx */ RTREE_CTX_INITIALIZER,
-
-/* O(name, type, nullable type) */
-#define TSD_DATA_FAST \
- O(thread_allocated, uint64_t, uint64_t) \
- O(thread_allocated_next_event_fast, uint64_t, uint64_t) \
- O(thread_deallocated, uint64_t, uint64_t) \
- O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \
- O(tcache, tcache_t, tcache_t)
-
-#define TSD_DATA_FAST_INITIALIZER \
- /* thread_allocated */ 0, \
- /* thread_allocated_next_event_fast */ 0, \
- /* thread_deallocated */ 0, \
- /* thread_deallocated_next_event_fast */ 0, \
- /* tcache */ TCACHE_ZERO_INITIALIZER,
-
-/* O(name, type, nullable type) */
-#define TSD_DATA_SLOWER \
- O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
- MALLOC_TEST_TSD
-
-#define TSD_DATA_SLOWER_INITIALIZER \
- /* witness */ WITNESS_TSD_INITIALIZER \
- /* test data */ MALLOC_TEST_TSD_INITIALIZER
-
-
-#define TSD_INITIALIZER { \
- TSD_DATA_SLOW_INITIALIZER \
- /* state */ ATOMIC_INIT(tsd_state_uninitialized), \
- TSD_DATA_FAST_INITIALIZER \
- TSD_DATA_SLOWER_INITIALIZER \
-}
-
-#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
-void _malloc_tsd_cleanup_register(bool (*f)(void));
-#endif
-
-void *malloc_tsd_malloc(size_t size);
-void malloc_tsd_dalloc(void *wrapper);
-tsd_t *malloc_tsd_boot0(void);
-void malloc_tsd_boot1(void);
-void tsd_cleanup(void *arg);
-tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
-void tsd_state_set(tsd_t *tsd, uint8_t new_state);
-void tsd_slow_update(tsd_t *tsd);
-void tsd_prefork(tsd_t *tsd);
-void tsd_postfork_parent(tsd_t *tsd);
-void tsd_postfork_child(tsd_t *tsd);
-
-/*
- * Call ..._inc when your module wants to take all threads down the slow paths,
- * and ..._dec when it no longer needs to.
- */
-void tsd_global_slow_inc(tsdn_t *tsdn);
-void tsd_global_slow_dec(tsdn_t *tsdn);
-bool tsd_global_slow();
-
-enum {
- /* Common case --> jnz. */
- tsd_state_nominal = 0,
- /* Initialized but on slow path. */
- tsd_state_nominal_slow = 1,
- /*
- * Some thread has changed global state in such a way that all nominal
- * threads need to recompute their fast / slow status the next time they
- * get a chance.
- *
- * Any thread can change another thread's status *to* recompute, but
- * threads are the only ones who can change their status *from*
- * recompute.
- */
- tsd_state_nominal_recompute = 2,
- /*
- * The above nominal states should be lower values. We use
- * tsd_nominal_max to separate nominal states from threads in the
- * process of being born / dying.
- */
- tsd_state_nominal_max = 2,
-
- /*
- * A thread might free() during its death as its only allocator action;
- * in such scenarios, we need tsd, but set up in such a way that no
- * cleanup is necessary.
- */
- tsd_state_minimal_initialized = 3,
- /* States during which we know we're in thread death. */
- tsd_state_purgatory = 4,
- tsd_state_reincarnated = 5,
- /*
- * What it says on the tin; tsd that hasn't been initialized. Note
- * that even when the tsd struct lives in TLS, when need to keep track
- * of stuff like whether or not our pthread destructors have been
- * scheduled, so this really truly is different than the nominal state.
- */
- tsd_state_uninitialized = 6
-};
-
-/*
- * Some TSD accesses can only be done in a nominal state. To enforce this, we
- * wrap TSD member access in a function that asserts on TSD state, and mangle
- * field names to prevent touching them accidentally.
- */
-#define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n
-
-#ifdef JEMALLOC_U8_ATOMICS
-# define tsd_state_t atomic_u8_t
-# define tsd_atomic_load atomic_load_u8
-# define tsd_atomic_store atomic_store_u8
-# define tsd_atomic_exchange atomic_exchange_u8
-#else
-# define tsd_state_t atomic_u32_t
-# define tsd_atomic_load atomic_load_u32
-# define tsd_atomic_store atomic_store_u32
-# define tsd_atomic_exchange atomic_exchange_u32
-#endif
-
-/* The actual tsd. */
-struct tsd_s {
- /*
- * The contents should be treated as totally opaque outside the tsd
- * module. Access any thread-local state through the getters and
- * setters below.
- */
-
-#define O(n, t, nt) \
- t TSD_MANGLE(n);
-
- TSD_DATA_SLOW
- /*
- * We manually limit the state to just a single byte. Unless the 8-bit
- * atomics are unavailable (which is rare).
- */
- tsd_state_t state;
- TSD_DATA_FAST
- TSD_DATA_SLOWER
-#undef O
-};
-
-JEMALLOC_ALWAYS_INLINE uint8_t
-tsd_state_get(tsd_t *tsd) {
- /*
- * This should be atomic. Unfortunately, compilers right now can't tell
- * that this can be done as a memory comparison, and forces a load into
- * a register that hurts fast-path performance.
- */
- /* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */
- return *(uint8_t *)&tsd->state;
-}
-
-/*
- * Wrapper around tsd_t that makes it possible to avoid implicit conversion
- * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
- * explicitly converted to tsd_t, which is non-nullable.
- */
-struct tsdn_s {
- tsd_t tsd;
-};
-#define TSDN_NULL ((tsdn_t *)0)
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsd_tsdn(tsd_t *tsd) {
- return (tsdn_t *)tsd;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsdn_null(const tsdn_t *tsdn) {
- return tsdn == NULL;
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsdn_tsd(tsdn_t *tsdn) {
- assert(!tsdn_null(tsdn));
-
- return &tsdn->tsd;
-}
-
-/*
- * We put the platform-specific data declarations and inlines into their own
- * header files to avoid cluttering this file. They define tsd_boot0,
- * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
- */
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
-#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
-#elif (defined(JEMALLOC_TLS))
-#include "jemalloc/internal/tsd_tls.h"
-#elif (defined(_WIN32))
-#include "jemalloc/internal/tsd_win.h"
-#else
-#include "jemalloc/internal/tsd_generic.h"
-#endif
-
-/*
- * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
- * foo. This omits some safety checks, and so can be used during tsd
- * initialization and cleanup.
- */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE t * \
-tsd_##n##p_get_unsafe(tsd_t *tsd) { \
- return &tsd->TSD_MANGLE(n); \
-}
-TSD_DATA_SLOW
-TSD_DATA_FAST
-TSD_DATA_SLOWER
-#undef O
-
-/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE t * \
-tsd_##n##p_get(tsd_t *tsd) { \
- /* \
- * Because the state might change asynchronously if it's \
- * nominal, we need to make sure that we only read it once. \
- */ \
- uint8_t state = tsd_state_get(tsd); \
- assert(state == tsd_state_nominal || \
- state == tsd_state_nominal_slow || \
- state == tsd_state_nominal_recompute || \
- state == tsd_state_reincarnated || \
- state == tsd_state_minimal_initialized); \
- return tsd_##n##p_get_unsafe(tsd); \
-}
-TSD_DATA_SLOW
-TSD_DATA_FAST
-TSD_DATA_SLOWER
-#undef O
-
-/*
- * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
- * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
- */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE nt * \
-tsdn_##n##p_get(tsdn_t *tsdn) { \
- if (tsdn_null(tsdn)) { \
- return NULL; \
- } \
- tsd_t *tsd = tsdn_tsd(tsdn); \
- return (nt *)tsd_##n##p_get(tsd); \
-}
-TSD_DATA_SLOW
-TSD_DATA_FAST
-TSD_DATA_SLOWER
-#undef O
-
-/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE t \
-tsd_##n##_get(tsd_t *tsd) { \
- return *tsd_##n##p_get(tsd); \
-}
-TSD_DATA_SLOW
-TSD_DATA_FAST
-TSD_DATA_SLOWER
-#undef O
-
-/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
-#define O(n, t, nt) \
-JEMALLOC_ALWAYS_INLINE void \
-tsd_##n##_set(tsd_t *tsd, t val) { \
- assert(tsd_state_get(tsd) != tsd_state_reincarnated && \
- tsd_state_get(tsd) != tsd_state_minimal_initialized); \
- *tsd_##n##p_get(tsd) = val; \
-}
-TSD_DATA_SLOW
-TSD_DATA_FAST
-TSD_DATA_SLOWER
-#undef O
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_assert_fast(tsd_t *tsd) {
- /*
- * Note that our fastness assertion does *not* include global slowness
- * counters; it's not in general possible to ensure that they won't
- * change asynchronously from underneath us.
- */
- assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
- tsd_reentrancy_level_get(tsd) == 0);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_fast(tsd_t *tsd) {
- bool fast = (tsd_state_get(tsd) == tsd_state_nominal);
- if (fast) {
- tsd_assert_fast(tsd);
- }
-
- return fast;
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init, bool minimal) {
- tsd_t *tsd = tsd_get(init);
-
- if (!init && tsd_get_allocates() && tsd == NULL) {
- return NULL;
- }
- assert(tsd != NULL);
-
- if (unlikely(tsd_state_get(tsd) != tsd_state_nominal)) {
- return tsd_fetch_slow(tsd, minimal);
- }
- assert(tsd_fast(tsd));
- tsd_assert_fast(tsd);
-
- return tsd;
-}
-
-/* Get a minimal TSD that requires no cleanup. See comments in free(). */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_min(void) {
- return tsd_fetch_impl(true, true);
-}
-
-/* For internal background threads use only. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_internal_fetch(void) {
- tsd_t *tsd = tsd_fetch_min();
- /* Use reincarnated state to prevent full initialization. */
- tsd_state_set(tsd, tsd_state_reincarnated);
-
- return tsd;
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch(void) {
- return tsd_fetch_impl(true, false);
-}
-
-static inline bool
-tsd_nominal(tsd_t *tsd) {
- bool nominal = tsd_state_get(tsd) <= tsd_state_nominal_max;
- assert(nominal || tsd_reentrancy_level_get(tsd) > 0);
-
- return nominal;
-}
-
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsdn_fetch(void) {
- if (!tsd_booted_get()) {
- return NULL;
- }
-
- return tsd_tsdn(tsd_fetch_impl(false, false));
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
-tsd_rtree_ctx(tsd_t *tsd) {
- return tsd_rtree_ctxp_get(tsd);
-}
-
-JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
-tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
- /*
- * If tsd cannot be accessed, initialize the fallback rtree_ctx and
- * return a pointer to it.
- */
- if (unlikely(tsdn_null(tsdn))) {
- rtree_ctx_data_init(fallback);
- return fallback;
- }
- return tsd_rtree_ctx(tsdn_tsd(tsdn));
-}
-
-static inline bool
-tsd_state_nocleanup(tsd_t *tsd) {
- return tsd_state_get(tsd) == tsd_state_reincarnated ||
- tsd_state_get(tsd) == tsd_state_minimal_initialized;
-}
-
-/*
- * These "raw" tsd reentrancy functions don't have any debug checking to make
- * sure that we're not touching arena 0. Better is to call pre_reentrancy and
- * post_reentrancy if this is possible.
- */
-static inline void
-tsd_pre_reentrancy_raw(tsd_t *tsd) {
- bool fast = tsd_fast(tsd);
- assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
- ++*tsd_reentrancy_levelp_get(tsd);
- if (fast) {
- /* Prepare slow path for reentrancy. */
- tsd_slow_update(tsd);
- assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
- }
-}
-
-static inline void
-tsd_post_reentrancy_raw(tsd_t *tsd) {
- int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
- assert(*reentrancy_level > 0);
- if (--*reentrancy_level == 0) {
- tsd_slow_update(tsd);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_TSD_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_generic.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_generic.h
deleted file mode 100644
index a718472f..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_generic.h
+++ /dev/null
@@ -1,182 +0,0 @@
-#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
-#error This file should be included only once, by tsd.h.
-#endif
-#define JEMALLOC_INTERNAL_TSD_GENERIC_H
-
-typedef struct tsd_init_block_s tsd_init_block_t;
-struct tsd_init_block_s {
- ql_elm(tsd_init_block_t) link;
- pthread_t thread;
- void *data;
-};
-
-/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
-typedef struct tsd_init_head_s tsd_init_head_t;
-
-typedef struct {
- bool initialized;
- tsd_t val;
-} tsd_wrapper_t;
-
-void *tsd_init_check_recursion(tsd_init_head_t *head,
- tsd_init_block_t *block);
-void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
-
-extern pthread_key_t tsd_tsd;
-extern tsd_init_head_t tsd_init_head;
-extern tsd_wrapper_t tsd_boot_wrapper;
-extern bool tsd_booted;
-
-/* Initialization/cleanup. */
-JEMALLOC_ALWAYS_INLINE void
-tsd_cleanup_wrapper(void *arg) {
- tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg;
-
- if (wrapper->initialized) {
- wrapper->initialized = false;
- tsd_cleanup(&wrapper->val);
- if (wrapper->initialized) {
- /* Trigger another cleanup round. */
- if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0)
- {
- malloc_write("<jemalloc>: Error setting TSD\n");
- if (opt_abort) {
- abort();
- }
- }
- return;
- }
- }
- malloc_tsd_dalloc(wrapper);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_wrapper_set(tsd_wrapper_t *wrapper) {
- if (unlikely(!tsd_booted)) {
- return;
- }
- if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
- malloc_write("<jemalloc>: Error setting TSD\n");
- abort();
- }
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
-tsd_wrapper_get(bool init) {
- tsd_wrapper_t *wrapper;
-
- if (unlikely(!tsd_booted)) {
- return &tsd_boot_wrapper;
- }
-
- wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
-
- if (init && unlikely(wrapper == NULL)) {
- tsd_init_block_t block;
- wrapper = (tsd_wrapper_t *)
- tsd_init_check_recursion(&tsd_init_head, &block);
- if (wrapper) {
- return wrapper;
- }
- wrapper = (tsd_wrapper_t *)
- malloc_tsd_malloc(sizeof(tsd_wrapper_t));
- block.data = (void *)wrapper;
- if (wrapper == NULL) {
- malloc_write("<jemalloc>: Error allocating TSD\n");
- abort();
- } else {
- wrapper->initialized = false;
- JEMALLOC_DIAGNOSTIC_PUSH
- JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
- tsd_t initializer = TSD_INITIALIZER;
- JEMALLOC_DIAGNOSTIC_POP
- wrapper->val = initializer;
- }
- tsd_wrapper_set(wrapper);
- tsd_init_finish(&tsd_init_head, &block);
- }
- return wrapper;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot0(void) {
- tsd_wrapper_t *wrapper;
- tsd_init_block_t block;
-
- wrapper = (tsd_wrapper_t *)
- tsd_init_check_recursion(&tsd_init_head, &block);
- if (wrapper) {
- return false;
- }
- block.data = &tsd_boot_wrapper;
- if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
- return true;
- }
- tsd_booted = true;
- tsd_wrapper_set(&tsd_boot_wrapper);
- tsd_init_finish(&tsd_init_head, &block);
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_boot1(void) {
- tsd_wrapper_t *wrapper;
- wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t));
- if (wrapper == NULL) {
- malloc_write("<jemalloc>: Error allocating TSD\n");
- abort();
- }
- tsd_boot_wrapper.initialized = false;
- tsd_cleanup(&tsd_boot_wrapper.val);
- wrapper->initialized = false;
- JEMALLOC_DIAGNOSTIC_PUSH
- JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
- tsd_t initializer = TSD_INITIALIZER;
- JEMALLOC_DIAGNOSTIC_POP
- wrapper->val = initializer;
- tsd_wrapper_set(wrapper);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot(void) {
- if (tsd_boot0()) {
- return true;
- }
- tsd_boot1();
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_booted_get(void) {
- return tsd_booted;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_get_allocates(void) {
- return true;
-}
-
-/* Get/set. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_get(bool init) {
- tsd_wrapper_t *wrapper;
-
- assert(tsd_booted);
- wrapper = tsd_wrapper_get(init);
- if (tsd_get_allocates() && !init && wrapper == NULL) {
- return NULL;
- }
- return &wrapper->val;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_set(tsd_t *val) {
- tsd_wrapper_t *wrapper;
-
- assert(tsd_booted);
- wrapper = tsd_wrapper_get(true);
- if (likely(&wrapper->val != val)) {
- wrapper->val = *(val);
- }
- wrapper->initialized = true;
-}
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
deleted file mode 100644
index d8f3ef13..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
-#error This file should be included only once, by tsd.h.
-#endif
-#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
-
-#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL
-
-extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls;
-extern JEMALLOC_TSD_TYPE_ATTR(bool) tsd_initialized;
-extern bool tsd_booted;
-
-/* Initialization/cleanup. */
-JEMALLOC_ALWAYS_INLINE bool
-tsd_cleanup_wrapper(void) {
- if (tsd_initialized) {
- tsd_initialized = false;
- tsd_cleanup(&tsd_tls);
- }
- return tsd_initialized;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot0(void) {
- _malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
- tsd_booted = true;
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_boot1(void) {
- /* Do nothing. */
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot(void) {
- return tsd_boot0();
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_booted_get(void) {
- return tsd_booted;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_get_allocates(void) {
- return false;
-}
-
-/* Get/set. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_get(bool init) {
- return &tsd_tls;
-}
-JEMALLOC_ALWAYS_INLINE void
-tsd_set(tsd_t *val) {
- assert(tsd_booted);
- if (likely(&tsd_tls != val)) {
- tsd_tls = (*val);
- }
- tsd_initialized = true;
-}
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_tls.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_tls.h
deleted file mode 100644
index 7d6c805b..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_tls.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifdef JEMALLOC_INTERNAL_TSD_TLS_H
-#error This file should be included only once, by tsd.h.
-#endif
-#define JEMALLOC_INTERNAL_TSD_TLS_H
-
-#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL
-
-extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls;
-extern pthread_key_t tsd_tsd;
-extern bool tsd_booted;
-
-/* Initialization/cleanup. */
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot0(void) {
- if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) {
- return true;
- }
- tsd_booted = true;
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_boot1(void) {
- /* Do nothing. */
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot(void) {
- return tsd_boot0();
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_booted_get(void) {
- return tsd_booted;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_get_allocates(void) {
- return false;
-}
-
-/* Get/set. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_get(bool init) {
- return &tsd_tls;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_set(tsd_t *val) {
- assert(tsd_booted);
- if (likely(&tsd_tls != val)) {
- tsd_tls = (*val);
- }
- if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) {
- malloc_write("<jemalloc>: Error setting tsd.\n");
- if (opt_abort) {
- abort();
- }
- }
-}
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_types.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_types.h
deleted file mode 100644
index a6ae37da..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_types.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
-#define JEMALLOC_INTERNAL_TSD_TYPES_H
-
-#define MALLOC_TSD_CLEANUPS_MAX 4
-
-typedef struct tsd_s tsd_t;
-typedef struct tsdn_s tsdn_t;
-typedef bool (*malloc_tsd_cleanup_t)(void);
-
-#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_win.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_win.h
deleted file mode 100644
index a91dac88..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/tsd_win.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
-#error This file should be included only once, by tsd.h.
-#endif
-#define JEMALLOC_INTERNAL_TSD_WIN_H
-
-typedef struct {
- bool initialized;
- tsd_t val;
-} tsd_wrapper_t;
-
-extern DWORD tsd_tsd;
-extern tsd_wrapper_t tsd_boot_wrapper;
-extern bool tsd_booted;
-
-/* Initialization/cleanup. */
-JEMALLOC_ALWAYS_INLINE bool
-tsd_cleanup_wrapper(void) {
- DWORD error = GetLastError();
- tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd);
- SetLastError(error);
-
- if (wrapper == NULL) {
- return false;
- }
-
- if (wrapper->initialized) {
- wrapper->initialized = false;
- tsd_cleanup(&wrapper->val);
- if (wrapper->initialized) {
- /* Trigger another cleanup round. */
- return true;
- }
- }
- malloc_tsd_dalloc(wrapper);
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_wrapper_set(tsd_wrapper_t *wrapper) {
- if (!TlsSetValue(tsd_tsd, (void *)wrapper)) {
- malloc_write("<jemalloc>: Error setting TSD\n");
- abort();
- }
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
-tsd_wrapper_get(bool init) {
- DWORD error = GetLastError();
- tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd);
- SetLastError(error);
-
- if (init && unlikely(wrapper == NULL)) {
- wrapper = (tsd_wrapper_t *)
- malloc_tsd_malloc(sizeof(tsd_wrapper_t));
- if (wrapper == NULL) {
- malloc_write("<jemalloc>: Error allocating TSD\n");
- abort();
- } else {
- wrapper->initialized = false;
- /* MSVC is finicky about aggregate initialization. */
- tsd_t tsd_initializer = TSD_INITIALIZER;
- wrapper->val = tsd_initializer;
- }
- tsd_wrapper_set(wrapper);
- }
- return wrapper;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot0(void) {
- tsd_tsd = TlsAlloc();
- if (tsd_tsd == TLS_OUT_OF_INDEXES) {
- return true;
- }
- _malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
- tsd_wrapper_set(&tsd_boot_wrapper);
- tsd_booted = true;
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_boot1(void) {
- tsd_wrapper_t *wrapper;
- wrapper = (tsd_wrapper_t *)
- malloc_tsd_malloc(sizeof(tsd_wrapper_t));
- if (wrapper == NULL) {
- malloc_write("<jemalloc>: Error allocating TSD\n");
- abort();
- }
- tsd_boot_wrapper.initialized = false;
- tsd_cleanup(&tsd_boot_wrapper.val);
- wrapper->initialized = false;
- tsd_t initializer = TSD_INITIALIZER;
- wrapper->val = initializer;
- tsd_wrapper_set(wrapper);
-}
-JEMALLOC_ALWAYS_INLINE bool
-tsd_boot(void) {
- if (tsd_boot0()) {
- return true;
- }
- tsd_boot1();
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_booted_get(void) {
- return tsd_booted;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsd_get_allocates(void) {
- return true;
-}
-
-/* Get/set. */
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_get(bool init) {
- tsd_wrapper_t *wrapper;
-
- assert(tsd_booted);
- wrapper = tsd_wrapper_get(init);
- if (tsd_get_allocates() && !init && wrapper == NULL) {
- return NULL;
- }
- return &wrapper->val;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-tsd_set(tsd_t *val) {
- tsd_wrapper_t *wrapper;
-
- assert(tsd_booted);
- wrapper = tsd_wrapper_get(true);
- if (likely(&wrapper->val != val)) {
- wrapper->val = *(val);
- }
- wrapper->initialized = true;
-}
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/typed_list.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/typed_list.h
deleted file mode 100644
index 6535055a..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/typed_list.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_TYPED_LIST_H
-#define JEMALLOC_INTERNAL_TYPED_LIST_H
-
-/*
- * This wraps the ql module to implement a list class in a way that's a little
- * bit easier to use; it handles ql_elm_new calls and provides type safety.
- */
-
-#define TYPED_LIST(list_type, el_type, linkage) \
-typedef struct { \
- ql_head(el_type) head; \
-} list_type##_t; \
-static inline void \
-list_type##_init(list_type##_t *list) { \
- ql_new(&list->head); \
-} \
-static inline el_type * \
-list_type##_first(const list_type##_t *list) { \
- return ql_first(&list->head); \
-} \
-static inline el_type * \
-list_type##_last(const list_type##_t *list) { \
- return ql_last(&list->head, linkage); \
-} \
-static inline void \
-list_type##_append(list_type##_t *list, el_type *item) { \
- ql_elm_new(item, linkage); \
- ql_tail_insert(&list->head, item, linkage); \
-} \
-static inline void \
-list_type##_prepend(list_type##_t *list, el_type *item) { \
- ql_elm_new(item, linkage); \
- ql_head_insert(&list->head, item, linkage); \
-} \
-static inline void \
-list_type##_replace(list_type##_t *list, el_type *to_remove, \
- el_type *to_insert) { \
- ql_elm_new(to_insert, linkage); \
- ql_after_insert(to_remove, to_insert, linkage); \
- ql_remove(&list->head, to_remove, linkage); \
-} \
-static inline void \
-list_type##_remove(list_type##_t *list, el_type *item) { \
- ql_remove(&list->head, item, linkage); \
-} \
-static inline bool \
-list_type##_empty(list_type##_t *list) { \
- return ql_empty(&list->head); \
-} \
-static inline void \
-list_type##_concat(list_type##_t *list_a, list_type##_t *list_b) { \
- ql_concat(&list_a->head, &list_b->head, linkage); \
-}
-
-#endif /* JEMALLOC_INTERNAL_TYPED_LIST_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/util.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/util.h
deleted file mode 100644
index dcb1c0a5..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/util.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_UTIL_H
-#define JEMALLOC_INTERNAL_UTIL_H
-
-#define UTIL_INLINE static inline
-
-/* Junk fill patterns. */
-#ifndef JEMALLOC_ALLOC_JUNK
-# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
-#endif
-#ifndef JEMALLOC_FREE_JUNK
-# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
-#endif
-
-/*
- * Wrap a cpp argument that contains commas such that it isn't broken up into
- * multiple arguments.
- */
-#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
-
-/* cpp macro definition stringification. */
-#define STRINGIFY_HELPER(x) #x
-#define STRINGIFY(x) STRINGIFY_HELPER(x)
-
-/*
- * Silence compiler warnings due to uninitialized values. This is used
- * wherever the compiler fails to recognize that the variable is never used
- * uninitialized.
- */
-#define JEMALLOC_CC_SILENCE_INIT(v) = v
-
-#ifdef __GNUC__
-# define likely(x) __builtin_expect(!!(x), 1)
-# define unlikely(x) __builtin_expect(!!(x), 0)
-#else
-# define likely(x) !!(x)
-# define unlikely(x) !!(x)
-#endif
-
-#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
-# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
-#endif
-
-#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
-
-/* Set error code. */
-UTIL_INLINE void
-set_errno(int errnum) {
-#ifdef _WIN32
- SetLastError(errnum);
-#else
- errno = errnum;
-#endif
-}
-
-/* Get last error code. */
-UTIL_INLINE int
-get_errno(void) {
-#ifdef _WIN32
- return GetLastError();
-#else
- return errno;
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE void
-util_assume(bool b) {
- if (!b) {
- unreachable();
- }
-}
-
-/* ptr should be valid. */
-JEMALLOC_ALWAYS_INLINE void
-util_prefetch_read(void *ptr) {
- /*
- * This should arguably be a config check; but any version of GCC so old
- * that it doesn't support __builtin_prefetch is also too old to build
- * jemalloc.
- */
-#ifdef __GNUC__
- if (config_debug) {
- /* Enforce the "valid ptr" requirement. */
- *(volatile char *)ptr;
- }
- __builtin_prefetch(ptr, /* read or write */ 0, /* locality hint */ 3);
-#else
- *(volatile char *)ptr;
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE void
-util_prefetch_write(void *ptr) {
-#ifdef __GNUC__
- if (config_debug) {
- *(volatile char *)ptr;
- }
- /*
- * The only difference from the read variant is that this has a 1 as the
- * second argument (the write hint).
- */
- __builtin_prefetch(ptr, 1, 3);
-#else
- *(volatile char *)ptr;
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE void
-util_prefetch_read_range(void *ptr, size_t sz) {
- for (size_t i = 0; i < sz; i += CACHELINE) {
- util_prefetch_read((void *)((uintptr_t)ptr + i));
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-util_prefetch_write_range(void *ptr, size_t sz) {
- for (size_t i = 0; i < sz; i += CACHELINE) {
- util_prefetch_write((void *)((uintptr_t)ptr + i));
- }
-}
-
-#undef UTIL_INLINE
-
-#endif /* JEMALLOC_INTERNAL_UTIL_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/witness.h b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/witness.h
deleted file mode 100644
index e81b9a00..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/internal/witness.h
+++ /dev/null
@@ -1,378 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_WITNESS_H
-#define JEMALLOC_INTERNAL_WITNESS_H
-
-#include "jemalloc/internal/ql.h"
-
-/******************************************************************************/
-/* LOCK RANKS */
-/******************************************************************************/
-
-enum witness_rank_e {
- /*
- * Order matters within this enum listing -- higher valued locks can
- * only be acquired after lower-valued ones. We use the
- * auto-incrementing-ness of enum values to enforce this.
- */
-
- /*
- * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the
- * witness machinery.
- */
- WITNESS_RANK_OMIT,
- WITNESS_RANK_MIN,
- WITNESS_RANK_INIT = WITNESS_RANK_MIN,
- WITNESS_RANK_CTL,
- WITNESS_RANK_TCACHES,
- WITNESS_RANK_ARENAS,
- WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
- WITNESS_RANK_PROF_DUMP,
- WITNESS_RANK_PROF_BT2GCTX,
- WITNESS_RANK_PROF_TDATAS,
- WITNESS_RANK_PROF_TDATA,
- WITNESS_RANK_PROF_LOG,
- WITNESS_RANK_PROF_GCTX,
- WITNESS_RANK_PROF_RECENT_DUMP,
- WITNESS_RANK_BACKGROUND_THREAD,
- /*
- * Used as an argument to witness_assert_depth_to_rank() in order to
- * validate depth excluding non-core locks with lower ranks. Since the
- * rank argument to witness_assert_depth_to_rank() is inclusive rather
- * than exclusive, this definition can have the same value as the
- * minimally ranked core lock.
- */
- WITNESS_RANK_CORE,
- WITNESS_RANK_DECAY = WITNESS_RANK_CORE,
- WITNESS_RANK_TCACHE_QL,
-
- WITNESS_RANK_SEC_SHARD,
-
- WITNESS_RANK_EXTENT_GROW,
- WITNESS_RANK_HPA_SHARD_GROW = WITNESS_RANK_EXTENT_GROW,
- WITNESS_RANK_SAN_BUMP_ALLOC = WITNESS_RANK_EXTENT_GROW,
-
- WITNESS_RANK_EXTENTS,
- WITNESS_RANK_HPA_SHARD = WITNESS_RANK_EXTENTS,
-
- WITNESS_RANK_HPA_CENTRAL_GROW,
- WITNESS_RANK_HPA_CENTRAL,
-
- WITNESS_RANK_EDATA_CACHE,
-
- WITNESS_RANK_RTREE,
- WITNESS_RANK_BASE,
- WITNESS_RANK_ARENA_LARGE,
- WITNESS_RANK_HOOK,
-
- WITNESS_RANK_LEAF=0x1000,
- WITNESS_RANK_BIN = WITNESS_RANK_LEAF,
- WITNESS_RANK_ARENA_STATS = WITNESS_RANK_LEAF,
- WITNESS_RANK_COUNTER_ACCUM = WITNESS_RANK_LEAF,
- WITNESS_RANK_DSS = WITNESS_RANK_LEAF,
- WITNESS_RANK_PROF_ACTIVE = WITNESS_RANK_LEAF,
- WITNESS_RANK_PROF_DUMP_FILENAME = WITNESS_RANK_LEAF,
- WITNESS_RANK_PROF_GDUMP = WITNESS_RANK_LEAF,
- WITNESS_RANK_PROF_NEXT_THR_UID = WITNESS_RANK_LEAF,
- WITNESS_RANK_PROF_RECENT_ALLOC = WITNESS_RANK_LEAF,
- WITNESS_RANK_PROF_STATS = WITNESS_RANK_LEAF,
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT = WITNESS_RANK_LEAF,
-};
-typedef enum witness_rank_e witness_rank_t;
-
-/******************************************************************************/
-/* PER-WITNESS DATA */
-/******************************************************************************/
-#if defined(JEMALLOC_DEBUG)
-# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
-#else
-# define WITNESS_INITIALIZER(name, rank)
-#endif
-
-typedef struct witness_s witness_t;
-typedef ql_head(witness_t) witness_list_t;
-typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
- void *);
-
-struct witness_s {
- /* Name, used for printing lock order reversal messages. */
- const char *name;
-
- /*
- * Witness rank, where 0 is lowest and WITNESS_RANK_LEAF is highest.
- * Witnesses must be acquired in order of increasing rank.
- */
- witness_rank_t rank;
-
- /*
- * If two witnesses are of equal rank and they have the samp comp
- * function pointer, it is called as a last attempt to differentiate
- * between witnesses of equal rank.
- */
- witness_comp_t *comp;
-
- /* Opaque data, passed to comp(). */
- void *opaque;
-
- /* Linkage for thread's currently owned locks. */
- ql_elm(witness_t) link;
-};
-
-/******************************************************************************/
-/* PER-THREAD DATA */
-/******************************************************************************/
-typedef struct witness_tsd_s witness_tsd_t;
-struct witness_tsd_s {
- witness_list_t witnesses;
- bool forking;
-};
-
-#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
-#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
-
-/******************************************************************************/
-/* (PER-THREAD) NULLABILITY HELPERS */
-/******************************************************************************/
-typedef struct witness_tsdn_s witness_tsdn_t;
-struct witness_tsdn_s {
- witness_tsd_t witness_tsd;
-};
-
-JEMALLOC_ALWAYS_INLINE witness_tsdn_t *
-witness_tsd_tsdn(witness_tsd_t *witness_tsd) {
- return (witness_tsdn_t *)witness_tsd;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-witness_tsdn_null(witness_tsdn_t *witness_tsdn) {
- return witness_tsdn == NULL;
-}
-
-JEMALLOC_ALWAYS_INLINE witness_tsd_t *
-witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) {
- assert(!witness_tsdn_null(witness_tsdn));
- return &witness_tsdn->witness_tsd;
-}
-
-/******************************************************************************/
-/* API */
-/******************************************************************************/
-void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp, void *opaque);
-
-typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
-extern witness_lock_error_t *JET_MUTABLE witness_lock_error;
-
-typedef void (witness_owner_error_t)(const witness_t *);
-extern witness_owner_error_t *JET_MUTABLE witness_owner_error;
-
-typedef void (witness_not_owner_error_t)(const witness_t *);
-extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error;
-
-typedef void (witness_depth_error_t)(const witness_list_t *,
- witness_rank_t rank_inclusive, unsigned depth);
-extern witness_depth_error_t *JET_MUTABLE witness_depth_error;
-
-void witnesses_cleanup(witness_tsd_t *witness_tsd);
-void witness_prefork(witness_tsd_t *witness_tsd);
-void witness_postfork_parent(witness_tsd_t *witness_tsd);
-void witness_postfork_child(witness_tsd_t *witness_tsd);
-
-/* Helper, not intended for direct use. */
-static inline bool
-witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) {
- witness_list_t *witnesses;
- witness_t *w;
-
- cassert(config_debug);
-
- witnesses = &witness_tsd->witnesses;
- ql_foreach(w, witnesses, link) {
- if (w == witness) {
- return true;
- }
- }
-
- return false;
-}
-
-static inline void
-witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) {
- witness_tsd_t *witness_tsd;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
- if (witness->rank == WITNESS_RANK_OMIT) {
- return;
- }
-
- if (witness_owner(witness_tsd, witness)) {
- return;
- }
- witness_owner_error(witness);
-}
-
-static inline void
-witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
- const witness_t *witness) {
- witness_tsd_t *witness_tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
- if (witness->rank == WITNESS_RANK_OMIT) {
- return;
- }
-
- witnesses = &witness_tsd->witnesses;
- ql_foreach(w, witnesses, link) {
- if (w == witness) {
- witness_not_owner_error(witness);
- }
- }
-}
-
-/* Returns depth. Not intended for direct use. */
-static inline unsigned
-witness_depth_to_rank(witness_list_t *witnesses, witness_rank_t rank_inclusive)
-{
- unsigned d = 0;
- witness_t *w = ql_last(witnesses, link);
-
- if (w != NULL) {
- ql_reverse_foreach(w, witnesses, link) {
- if (w->rank < rank_inclusive) {
- break;
- }
- d++;
- }
- }
-
- return d;
-}
-
-static inline void
-witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
- witness_rank_t rank_inclusive, unsigned depth) {
- if (!config_debug || witness_tsdn_null(witness_tsdn)) {
- return;
- }
-
- witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses;
- unsigned d = witness_depth_to_rank(witnesses, rank_inclusive);
-
- if (d != depth) {
- witness_depth_error(witnesses, rank_inclusive, depth);
- }
-}
-
-static inline void
-witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) {
- witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth);
-}
-
-static inline void
-witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
- witness_assert_depth(witness_tsdn, 0);
-}
-
-static inline void
-witness_assert_positive_depth_to_rank(witness_tsdn_t *witness_tsdn,
- witness_rank_t rank_inclusive) {
- if (!config_debug || witness_tsdn_null(witness_tsdn)) {
- return;
- }
-
- witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses;
- unsigned d = witness_depth_to_rank(witnesses, rank_inclusive);
-
- if (d == 0) {
- witness_depth_error(witnesses, rank_inclusive, 1);
- }
-}
-
-static inline void
-witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
- witness_tsd_t *witness_tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
- if (witness->rank == WITNESS_RANK_OMIT) {
- return;
- }
-
- witness_assert_not_owner(witness_tsdn, witness);
-
- witnesses = &witness_tsd->witnesses;
- w = ql_last(witnesses, link);
- if (w == NULL) {
- /* No other locks; do nothing. */
- } else if (witness_tsd->forking && w->rank <= witness->rank) {
- /* Forking, and relaxed ranking satisfied. */
- } else if (w->rank > witness->rank) {
- /* Not forking, rank order reversal. */
- witness_lock_error(witnesses, witness);
- } else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
- witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
- 0)) {
- /*
- * Missing/incompatible comparison function, or comparison
- * function indicates rank order reversal.
- */
- witness_lock_error(witnesses, witness);
- }
-
- ql_elm_new(witness, link);
- ql_tail_insert(witnesses, witness, link);
-}
-
-static inline void
-witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
- witness_tsd_t *witness_tsd;
- witness_list_t *witnesses;
-
- if (!config_debug) {
- return;
- }
-
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
- if (witness->rank == WITNESS_RANK_OMIT) {
- return;
- }
-
- /*
- * Check whether owner before removal, rather than relying on
- * witness_assert_owner() to abort, so that unit tests can test this
- * function's failure mode without causing undefined behavior.
- */
- if (witness_owner(witness_tsd, witness)) {
- witnesses = &witness_tsd->witnesses;
- ql_remove(witnesses, witness, link);
- } else {
- witness_assert_owner(witness_tsdn, witness);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_WITNESS_H */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc.sh b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc.sh
deleted file mode 100755
index b19b1548..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-
-objroot=$1
-
-cat <<EOF
-#ifndef JEMALLOC_H_
-#define JEMALLOC_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-EOF
-
-for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
- jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do
- cat "${objroot}include/jemalloc/${hdr}" \
- | grep -v 'Generated from .* by configure\.' \
- | sed -e 's/ $//g'
- echo
-done
-
-cat <<EOF
-#ifdef __cplusplus
-}
-#endif
-#endif /* JEMALLOC_H_ */
-EOF
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_defs.h.in b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_defs.h.in
deleted file mode 100644
index cbe2fca6..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_defs.h.in
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Defined if __attribute__((...)) syntax is supported. */
-#undef JEMALLOC_HAVE_ATTR
-
-/* Defined if alloc_size attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-
-/* Defined if format_arg(...) attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_FORMAT_ARG
-
-/* Defined if format(gnu_printf, ...) attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-
-/* Defined if format(printf, ...) attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
-
-/* Defined if fallthrough attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_FALLTHROUGH
-
-/* Defined if cold attribute is supported. */
-#undef JEMALLOC_HAVE_ATTR_COLD
-
-/*
- * Define overrides for non-standard allocator-related functions if they are
- * present on the system.
- */
-#undef JEMALLOC_OVERRIDE_MEMALIGN
-#undef JEMALLOC_OVERRIDE_VALLOC
-
-/*
- * At least Linux omits the "const" in:
- *
- * size_t malloc_usable_size(const void *ptr);
- *
- * Match the operating system's prototype.
- */
-#undef JEMALLOC_USABLE_SIZE_CONST
-
-/*
- * If defined, specify throw() for the public function prototypes when compiling
- * with C++. The only justification for this is to match the prototypes that
- * glibc defines.
- */
-#undef JEMALLOC_USE_CXX_THROW
-
-#ifdef _MSC_VER
-# ifdef _WIN64
-# define LG_SIZEOF_PTR_WIN 3
-# else
-# define LG_SIZEOF_PTR_WIN 2
-# endif
-#endif
-
-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
-#undef LG_SIZEOF_PTR
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_macros.h.in b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_macros.h.in
deleted file mode 100644
index ebb3137e..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_macros.h.in
+++ /dev/null
@@ -1,149 +0,0 @@
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <limits.h>
-#include <strings.h>
-
-#define JEMALLOC_VERSION "@jemalloc_version@"
-#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
-#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
-#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
-#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
-#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
-#define JEMALLOC_VERSION_GID_IDENT @jemalloc_version_gid@
-
-#define MALLOCX_LG_ALIGN(la) ((int)(la))
-#if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
-#else
-# define MALLOCX_ALIGN(a) \
- ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)(((size_t)(a))>>32))+31))
-#endif
-#define MALLOCX_ZERO ((int)0x40)
-/*
- * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
- * encodes MALLOCX_TCACHE_NONE.
- */
-#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
-#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
-/*
- * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
- */
-#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
-
-/*
- * Use as arena index in "arena.<i>.{purge,decay,dss}" and
- * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
- * definition is intentionally specified in raw decimal format to support
- * cpp-based string concatenation, e.g.
- *
- * #define STRINGIFY_HELPER(x) #x
- * #define STRINGIFY(x) STRINGIFY_HELPER(x)
- *
- * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
- * 0);
- */
-#define MALLCTL_ARENAS_ALL 4096
-/*
- * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
- * destroyed arenas.
- */
-#define MALLCTL_ARENAS_DESTROYED 4097
-
-#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
-# define JEMALLOC_CXX_THROW throw()
-#else
-# define JEMALLOC_CXX_THROW
-#endif
-
-#if defined(_MSC_VER)
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_ALIGNED(s) __declspec(align(s))
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# ifndef JEMALLOC_EXPORT
-# ifdef DLLEXPORT
-# define JEMALLOC_EXPORT __declspec(dllexport)
-# else
-# define JEMALLOC_EXPORT __declspec(dllimport)
-# endif
-# endif
-# define JEMALLOC_FORMAT_ARG(i)
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# define JEMALLOC_FALLTHROUGH
-# define JEMALLOC_NOINLINE __declspec(noinline)
-# ifdef __cplusplus
-# define JEMALLOC_NOTHROW __declspec(nothrow)
-# else
-# define JEMALLOC_NOTHROW
-# endif
-# define JEMALLOC_SECTION(s) __declspec(allocate(s))
-# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
-# if _MSC_VER >= 1900 && !defined(__EDG__)
-# define JEMALLOC_ALLOCATOR __declspec(allocator)
-# else
-# define JEMALLOC_ALLOCATOR
-# endif
-# define JEMALLOC_COLD
-#elif defined(JEMALLOC_HAVE_ATTR)
-# define JEMALLOC_ATTR(s) __attribute__((s))
-# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-# else
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# endif
-# ifndef JEMALLOC_EXPORT
-# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
-# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
-# else
-# define JEMALLOC_FORMAT_ARG(i)
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-# else
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH
-# define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough)
-# else
-# define JEMALLOC_FALLTHROUGH
-# endif
-# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
-# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
-# ifdef JEMALLOC_HAVE_ATTR_COLD
-# define JEMALLOC_COLD JEMALLOC_ATTR(__cold__)
-# else
-# define JEMALLOC_COLD
-# endif
-#else
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_ALIGNED(s)
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# define JEMALLOC_EXPORT
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# define JEMALLOC_FALLTHROUGH
-# define JEMALLOC_NOINLINE
-# define JEMALLOC_NOTHROW
-# define JEMALLOC_SECTION(s)
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
-# define JEMALLOC_COLD
-#endif
-
-#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME)
-# define JEMALLOC_SYS_NOTHROW
-#else
-# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
-#endif
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_mangle.sh b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_mangle.sh
deleted file mode 100755
index c675bb46..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_mangle.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/sh -eu
-
-public_symbols_txt=$1
-symbol_prefix=$2
-
-cat <<EOF
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-# ifndef JEMALLOC_NO_DEMANGLE
-# define JEMALLOC_NO_DEMANGLE
-# endif
-EOF
-
-for nm in `cat ${public_symbols_txt}` ; do
- n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
- echo "# define ${n} ${symbol_prefix}${n}"
-done
-
-cat <<EOF
-#endif
-
-/*
- * The ${symbol_prefix}* macros can be used as stable alternative names for the
- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
- * meant for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-EOF
-
-for nm in `cat ${public_symbols_txt}` ; do
- n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
- echo "# undef ${symbol_prefix}${n}"
-done
-
-cat <<EOF
-#endif
-EOF
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_protos.h.in b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_protos.h.in
deleted file mode 100644
index 356221cc..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_protos.h.in
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * The @je_@ prefix on the following public symbol declarations is an artifact
- * of namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
- */
-extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
-extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
- const char *s);
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_SYS_NOTHROW *@je_@malloc(size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_SYS_NOTHROW *@je_@calloc(size_t num, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW @je_@posix_memalign(
- void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW
- JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_SYS_NOTHROW *@je_@aligned_alloc(size_t alignment,
- size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
- JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_SYS_NOTHROW *@je_@realloc(void *ptr, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW @je_@free(void *ptr)
- JEMALLOC_CXX_THROW;
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *@je_@mallocx(size_t size, int flags)
- JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *@je_@rallocx(void *ptr, size_t size,
- int flags) JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@xallocx(void *ptr, size_t size,
- size_t extra, int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@sallocx(const void *ptr,
- int flags) JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@sdallocx(void *ptr, size_t size,
- int flags);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@nallocx(size_t size, int flags)
- JEMALLOC_ATTR(pure);
-
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctl(const char *name,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlnametomib(const char *name,
- size_t *mibp, size_t *miblenp);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlbymib(const size_t *mib,
- size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print(
- void (*write_cb)(void *, const char *), void *@je_@cbopaque,
- const char *opts);
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
-#ifdef JEMALLOC_HAVE_MALLOC_SIZE
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_size(
- const void *ptr);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_SYS_NOTHROW *@je_@memalign(size_t alignment, size_t size)
- JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_SYS_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
- JEMALLOC_ATTR(malloc);
-#endif
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_rename.sh b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_rename.sh
deleted file mode 100755
index f9438912..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_rename.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-public_symbols_txt=$1
-
-cat <<EOF
-/*
- * Name mangling for public symbols is controlled by --with-mangling and
- * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
- * these macro definitions.
- */
-#ifndef JEMALLOC_NO_RENAME
-EOF
-
-for nm in `cat ${public_symbols_txt}` ; do
- n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
- m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
- echo "# define je_${n} ${m}"
-done
-
-cat <<EOF
-#endif
-EOF
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_typedefs.h.in b/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_typedefs.h.in
deleted file mode 100644
index 1a588743..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/jemalloc/jemalloc_typedefs.h.in
+++ /dev/null
@@ -1,77 +0,0 @@
-typedef struct extent_hooks_s extent_hooks_t;
-
-/*
- * void *
- * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
- */
-typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
- bool *, unsigned);
-
-/*
- * bool
- * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * bool committed, unsigned arena_ind);
- */
-typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
- unsigned);
-
-/*
- * void
- * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * bool committed, unsigned arena_ind);
- */
-typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
- unsigned);
-
-/*
- * bool
- * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- unsigned);
-
-/*
- * bool
- * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
- size_t, unsigned);
-
-/*
- * bool
- * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t offset, size_t length, unsigned arena_ind);
- */
-typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- unsigned);
-
-/*
- * bool
- * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
- * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
- bool, unsigned);
-
-/*
- * bool
- * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
- */
-typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
- bool, unsigned);
-
-struct extent_hooks_s {
- extent_alloc_t *alloc;
- extent_dalloc_t *dalloc;
- extent_destroy_t *destroy;
- extent_commit_t *commit;
- extent_decommit_t *decommit;
- extent_purge_t *purge_lazy;
- extent_purge_t *purge_forced;
- extent_split_t *split;
- extent_merge_t *merge;
-};
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/C99/stdbool.h b/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/C99/stdbool.h
deleted file mode 100644
index d92160eb..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/C99/stdbool.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef stdbool_h
-#define stdbool_h
-
-#include <wtypes.h>
-
-/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
-/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
-/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
- * a built-in type. */
-#ifndef __clang__
-typedef BOOL _Bool;
-#endif
-
-#define bool _Bool
-#define true 1
-#define false 0
-
-#define __bool_true_false_are_defined 1
-
-#endif /* stdbool_h */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/C99/stdint.h b/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/C99/stdint.h
deleted file mode 100644
index d02608a5..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/C99/stdint.h
+++ /dev/null
@@ -1,247 +0,0 @@
-// ISO C9x compliant stdint.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
-// Copyright (c) 2006-2008 Alexander Chemeris
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. The name of the author may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-///////////////////////////////////////////////////////////////////////////////
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_STDINT_H_ // [
-#define _MSC_STDINT_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-#include <limits.h>
-
-// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
-// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
-// or compiler give many errors like this:
-// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
-#ifdef __cplusplus
-extern "C" {
-#endif
-# include <wchar.h>
-#ifdef __cplusplus
-}
-#endif
-
-// Define _W64 macros to mark types changing their size, like intptr_t.
-#ifndef _W64
-# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
-# define _W64 __w64
-# else
-# define _W64
-# endif
-#endif
-
-
-// 7.18.1 Integer types
-
-// 7.18.1.1 Exact-width integer types
-
-// Visual Studio 6 and Embedded Visual C++ 4 doesn't
-// realize that, e.g. char has the same size as __int8
-// so we give up on __intX for them.
-#if (_MSC_VER < 1300)
- typedef signed char int8_t;
- typedef signed short int16_t;
- typedef signed int int32_t;
- typedef unsigned char uint8_t;
- typedef unsigned short uint16_t;
- typedef unsigned int uint32_t;
-#else
- typedef signed __int8 int8_t;
- typedef signed __int16 int16_t;
- typedef signed __int32 int32_t;
- typedef unsigned __int8 uint8_t;
- typedef unsigned __int16 uint16_t;
- typedef unsigned __int32 uint32_t;
-#endif
-typedef signed __int64 int64_t;
-typedef unsigned __int64 uint64_t;
-
-
-// 7.18.1.2 Minimum-width integer types
-typedef int8_t int_least8_t;
-typedef int16_t int_least16_t;
-typedef int32_t int_least32_t;
-typedef int64_t int_least64_t;
-typedef uint8_t uint_least8_t;
-typedef uint16_t uint_least16_t;
-typedef uint32_t uint_least32_t;
-typedef uint64_t uint_least64_t;
-
-// 7.18.1.3 Fastest minimum-width integer types
-typedef int8_t int_fast8_t;
-typedef int16_t int_fast16_t;
-typedef int32_t int_fast32_t;
-typedef int64_t int_fast64_t;
-typedef uint8_t uint_fast8_t;
-typedef uint16_t uint_fast16_t;
-typedef uint32_t uint_fast32_t;
-typedef uint64_t uint_fast64_t;
-
-// 7.18.1.4 Integer types capable of holding object pointers
-#ifdef _WIN64 // [
- typedef signed __int64 intptr_t;
- typedef unsigned __int64 uintptr_t;
-#else // _WIN64 ][
- typedef _W64 signed int intptr_t;
- typedef _W64 unsigned int uintptr_t;
-#endif // _WIN64 ]
-
-// 7.18.1.5 Greatest-width integer types
-typedef int64_t intmax_t;
-typedef uint64_t uintmax_t;
-
-
-// 7.18.2 Limits of specified-width integer types
-
-#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
-
-// 7.18.2.1 Limits of exact-width integer types
-#define INT8_MIN ((int8_t)_I8_MIN)
-#define INT8_MAX _I8_MAX
-#define INT16_MIN ((int16_t)_I16_MIN)
-#define INT16_MAX _I16_MAX
-#define INT32_MIN ((int32_t)_I32_MIN)
-#define INT32_MAX _I32_MAX
-#define INT64_MIN ((int64_t)_I64_MIN)
-#define INT64_MAX _I64_MAX
-#define UINT8_MAX _UI8_MAX
-#define UINT16_MAX _UI16_MAX
-#define UINT32_MAX _UI32_MAX
-#define UINT64_MAX _UI64_MAX
-
-// 7.18.2.2 Limits of minimum-width integer types
-#define INT_LEAST8_MIN INT8_MIN
-#define INT_LEAST8_MAX INT8_MAX
-#define INT_LEAST16_MIN INT16_MIN
-#define INT_LEAST16_MAX INT16_MAX
-#define INT_LEAST32_MIN INT32_MIN
-#define INT_LEAST32_MAX INT32_MAX
-#define INT_LEAST64_MIN INT64_MIN
-#define INT_LEAST64_MAX INT64_MAX
-#define UINT_LEAST8_MAX UINT8_MAX
-#define UINT_LEAST16_MAX UINT16_MAX
-#define UINT_LEAST32_MAX UINT32_MAX
-#define UINT_LEAST64_MAX UINT64_MAX
-
-// 7.18.2.3 Limits of fastest minimum-width integer types
-#define INT_FAST8_MIN INT8_MIN
-#define INT_FAST8_MAX INT8_MAX
-#define INT_FAST16_MIN INT16_MIN
-#define INT_FAST16_MAX INT16_MAX
-#define INT_FAST32_MIN INT32_MIN
-#define INT_FAST32_MAX INT32_MAX
-#define INT_FAST64_MIN INT64_MIN
-#define INT_FAST64_MAX INT64_MAX
-#define UINT_FAST8_MAX UINT8_MAX
-#define UINT_FAST16_MAX UINT16_MAX
-#define UINT_FAST32_MAX UINT32_MAX
-#define UINT_FAST64_MAX UINT64_MAX
-
-// 7.18.2.4 Limits of integer types capable of holding object pointers
-#ifdef _WIN64 // [
-# define INTPTR_MIN INT64_MIN
-# define INTPTR_MAX INT64_MAX
-# define UINTPTR_MAX UINT64_MAX
-#else // _WIN64 ][
-# define INTPTR_MIN INT32_MIN
-# define INTPTR_MAX INT32_MAX
-# define UINTPTR_MAX UINT32_MAX
-#endif // _WIN64 ]
-
-// 7.18.2.5 Limits of greatest-width integer types
-#define INTMAX_MIN INT64_MIN
-#define INTMAX_MAX INT64_MAX
-#define UINTMAX_MAX UINT64_MAX
-
-// 7.18.3 Limits of other integer types
-
-#ifdef _WIN64 // [
-# define PTRDIFF_MIN _I64_MIN
-# define PTRDIFF_MAX _I64_MAX
-#else // _WIN64 ][
-# define PTRDIFF_MIN _I32_MIN
-# define PTRDIFF_MAX _I32_MAX
-#endif // _WIN64 ]
-
-#define SIG_ATOMIC_MIN INT_MIN
-#define SIG_ATOMIC_MAX INT_MAX
-
-#ifndef SIZE_MAX // [
-# ifdef _WIN64 // [
-# define SIZE_MAX _UI64_MAX
-# else // _WIN64 ][
-# define SIZE_MAX _UI32_MAX
-# endif // _WIN64 ]
-#endif // SIZE_MAX ]
-
-// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
-#ifndef WCHAR_MIN // [
-# define WCHAR_MIN 0
-#endif // WCHAR_MIN ]
-#ifndef WCHAR_MAX // [
-# define WCHAR_MAX _UI16_MAX
-#endif // WCHAR_MAX ]
-
-#define WINT_MIN 0
-#define WINT_MAX _UI16_MAX
-
-#endif // __STDC_LIMIT_MACROS ]
-
-
-// 7.18.4 Limits of other integer types
-
-#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
-
-// 7.18.4.1 Macros for minimum-width integer constants
-
-#define INT8_C(val) val##i8
-#define INT16_C(val) val##i16
-#define INT32_C(val) val##i32
-#define INT64_C(val) val##i64
-
-#define UINT8_C(val) val##ui8
-#define UINT16_C(val) val##ui16
-#define UINT32_C(val) val##ui32
-#define UINT64_C(val) val##ui64
-
-// 7.18.4.2 Macros for greatest-width integer constants
-#define INTMAX_C INT64_C
-#define UINTMAX_C UINT64_C
-
-#endif // __STDC_CONSTANT_MACROS ]
-
-
-#endif // _MSC_STDINT_H_ ]
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/strings.h b/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/strings.h
deleted file mode 100644
index 996f256c..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/strings.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef strings_h
-#define strings_h
-
-/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
- * for both */
-#ifdef _MSC_VER
-# include <intrin.h>
-# pragma intrinsic(_BitScanForward)
-static __forceinline int ffsl(long x) {
- unsigned long i;
-
- if (_BitScanForward(&i, x)) {
- return i + 1;
- }
- return 0;
-}
-
-static __forceinline int ffs(int x) {
- return ffsl(x);
-}
-
-# ifdef _M_X64
-# pragma intrinsic(_BitScanForward64)
-# endif
-
-static __forceinline int ffsll(unsigned __int64 x) {
- unsigned long i;
-#ifdef _M_X64
- if (_BitScanForward64(&i, x)) {
- return i + 1;
- }
- return 0;
-#else
-// Fallback for 32-bit build where 64-bit version not available
-// assuming little endian
- union {
- unsigned __int64 ll;
- unsigned long l[2];
- } s;
-
- s.ll = x;
-
- if (_BitScanForward(&i, s.l[0])) {
- return i + 1;
- } else if(_BitScanForward(&i, s.l[1])) {
- return i + 33;
- }
- return 0;
-#endif
-}
-
-#else
-# define ffsll(x) __builtin_ffsll(x)
-# define ffsl(x) __builtin_ffsl(x)
-# define ffs(x) __builtin_ffs(x)
-#endif
-
-#endif /* strings_h */
diff --git a/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/windows_extra.h b/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/windows_extra.h
deleted file mode 100644
index a6ebb930..00000000
--- a/fluent-bit/lib/jemalloc-5.3.0/include/msvc_compat/windows_extra.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
-#define MSVC_COMPAT_WINDOWS_EXTRA_H
-
-#include <errno.h>
-
-#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */